diff --git a/.github/workflows/static_tests.yml b/.github/workflows/static_tests.yml index 7f2cc313d..8dfe3fe63 100644 --- a/.github/workflows/static_tests.yml +++ b/.github/workflows/static_tests.yml @@ -57,4 +57,4 @@ jobs: - name: "Run flake8..." run: | - flake8 . + flake8 . diff --git a/.gitignore b/.gitignore index 61f7e3021..3a9bffbe7 100755 --- a/.gitignore +++ b/.gitignore @@ -37,4 +37,4 @@ libtool /.settings/ doxygen python/test/log.txt -.vscode/ \ No newline at end of file +.vscode/ diff --git a/Makefile.am b/Makefile.am index e994ab5f4..e59771e5f 100644 --- a/Makefile.am +++ b/Makefile.am @@ -175,7 +175,7 @@ libnestgpu.la: $(OBJS) lib_LTLIBRARIES = libnestgpu.la -%.cu: +%.cu: clean-local: rm -f *.so obj/*.o diff --git a/README.md b/README.md index c2ccaacf9..932044ca8 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ NEST GPU is a GPU-MPI library for simulation of large-scale networks of spiking neurons. Can be used in Python, in C++ and in C. -With this library it is possible to run relatively fast simulations of large-scale networks of spiking neurons. For instance, on a single Nvidia GeForce RTX 2080 Ti GPU board it is possible to simulate the activity of 1 million multisynapse AdEx neurons with 1000 synapse per neurons, for a total of 1 billion synapse, using the fifth-order Runge-Kutta method with adaptive stepsize as differential equations solver, in little more than 70 seconds per second of neural activity. The MPI communication is also very efficient. +With this library it is possible to run relatively fast simulations of large-scale networks of spiking neurons. For instance, on a single Nvidia GeForce RTX 2080 Ti GPU board it is possible to simulate the activity of 1 million multisynapse AdEx neurons with 1000 synapse per neurons, for a total of 1 billion synapse, using the fifth-order Runge-Kutta method with adaptive stepsize as differential equations solver, in little more than 70 seconds per second of neural activity. The MPI communication is also very efficient. The Python interface is very similar to that of the NEST simulator: the most used commands are practically identical, dictionaries are used to define neurons, connections and synapses properties in the same way. ## Documentation diff --git a/VERSION b/VERSION index ab88d5d8b..537f20aa4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ - 1.08 + 1.08 diff --git a/c++/examples/brunel_mpi.cpp b/c++/examples/brunel_mpi.cpp index fe9335e9f..df7596536 100644 --- a/c++/examples/brunel_mpi.cpp +++ b/c++/examples/brunel_mpi.cpp @@ -49,7 +49,7 @@ int main(int argc, char *argv[]) cout << "Building on host " << mpi_id << " ..." < brunel_mpi_nx\n"; return -1; } - + int mpi_id = neural_gpu.MpiId(); cout << "Building on host " << mpi_id << " ..." <`_. - diff --git a/doc/conf.py b/doc/conf.py index 647a66455..4b27cd924 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -30,9 +30,10 @@ # documentation root, use str(Path().resolve()) to make it absolute. # -import sys import json +import sys from pathlib import Path + sys.path.insert(0, str(Path().resolve())) source_dir = Path(__file__).resolve().parent.resolve() @@ -44,15 +45,15 @@ # -- Project information ----------------------------------------------------- -project = u'NEST GPU Documentation' -copyright = u'2004, nest-simulator' -author = u'nest-simulator' +project = "NEST GPU Documentation" +copyright = "2004, nest-simulator" +author = "nest-simulator" # The full version, including alpha/beta/rc tags -release = '1' +release = "1" -source_suffix = '.rst' -master_doc = 'contents' +source_suffix = ".rst" +master_doc = "contents" # -- General configuration --------------------------------------------------- @@ -60,78 +61,83 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx_rtd_theme', - 'sphinx.ext.intersphinx', - 'sphinx.ext.mathjax', - 'sphinx.ext.napoleon', - 'sphinx_tabs.tabs', - 'nbsphinx' + "sphinx_rtd_theme", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "sphinx.ext.napoleon", + "sphinx_tabs.tabs", + "nbsphinx", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'manni' +pygments_style = "manni" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] intersphinx_mapping = { - 'python': ('https://docs.python.org/3', None), - 'nest': ('https://nest-simulator.readthedocs.io/en/latest/', None), - 'nestml': ('https://nestml.readthedocs.io/en/latest/', None), - 'pynn': ('http://neuralensemble.org/docs/PyNN/', None), - 'elephant': ('https://elephant.readthedocs.io/en/latest/', None), - 'desktop': ('https://nest-desktop.readthedocs.io/en/latest/', None), - 'neuromorph': ('https://electronicvisions.github.io/hbp-sp9-guidebook/', None), - 'arbor': ('https://arbor.readthedocs.io/en/latest/', None), - 'tvb': ('http://docs.thevirtualbrain.org/', None), - 'extmod': ('https://nest-extension-module.readthedocs.io/en/latest/', None), + "python": ("https://docs.python.org/3", None), + "nest": ("https://nest-simulator.readthedocs.io/en/latest/", None), + "nestml": ("https://nestml.readthedocs.io/en/latest/", None), + "pynn": ("http://neuralensemble.org/docs/PyNN/", None), + "elephant": ("https://elephant.readthedocs.io/en/latest/", None), + "desktop": ("https://nest-desktop.readthedocs.io/en/latest/", None), + "neuromorph": ("https://electronicvisions.github.io/hbp-sp9-guidebook/", None), + "arbor": ("https://arbor.readthedocs.io/en/latest/", None), + "tvb": ("http://docs.thevirtualbrain.org/", None), + "extmod": ("https://nest-extension-module.readthedocs.io/en/latest/", None), } # Extract documentation from header files in src/ -from extractor_userdocs import relative_glob, ExtractUserDocs +from extractor_userdocs import ExtractUserDocs, relative_glob + def config_inited_handler(app, config): ExtractUserDocs( listoffiles=relative_glob("../src/*.h", basedir=source_dir), basedir=source_dir, - outdir=str(doc_build_dir) + outdir=str(doc_build_dir), ) + def setup(app): # for events see # https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx-core-events - app.connect('config-inited', config_inited_handler) + app.connect("config-inited", config_inited_handler) + # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" html_show_sphinx = False html_show_copyright = False # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] html_css_files = [ - 'css/custom.css', - 'css/pygments.css', + "css/custom.css", + "css/pygments.css", ] -html_logo = 'logo/nestgpu-logo.png' -html_theme_options = {'logo_only': True, - 'display_version': True} +html_logo = "logo/nestgpu-logo.png" +html_theme_options = {"logo_only": True, "display_version": True} + def setup(app): - app.connect('config-inited', config_inited_handler) + app.connect("config-inited", config_inited_handler) + + # app.add_css_file('css/custom.css') -# app.add_css_file('css/pygments.css') \ No newline at end of file +# app.add_css_file('css/pygments.css') diff --git a/doc/contents.rst b/doc/contents.rst index 95eec1a05..d0cb79066 100644 --- a/doc/contents.rst +++ b/doc/contents.rst @@ -14,7 +14,7 @@ Table of Contents Guides Examples Model Directory - + .. toctree:: :maxdepth: 2 @@ -22,4 +22,3 @@ Table of Contents Community Publications - diff --git a/doc/examples/index.rst b/doc/examples/index.rst index 4b91fd7cf..3e5e32efe 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -1,4 +1,4 @@ Examples ======== -You can find some Python examples and some C++ examples in the directories `python/examples `_ and `c++/examples `_ respectively. \ No newline at end of file +You can find some Python examples and some C++ examples in the directories `python/examples `_ and `c++/examples `_ respectively. diff --git a/doc/extractor_userdocs.py b/doc/extractor_userdocs.py index e5da5794e..af675d416 100644 --- a/doc/extractor_userdocs.py +++ b/doc/extractor_userdocs.py @@ -19,23 +19,27 @@ # along with NEST GPU. If not, see . import re -from tqdm import tqdm from pprint import pformat + +from tqdm import tqdm + try: - from math import comb # breaks in Python < 3.8 + from math import comb # breaks in Python < 3.8 except ImportError: from math import factorial as fac def comb(n, k): return fac(n) / (fac(k) * fac(n - k)) -import os -import sys + import glob import json -from itertools import chain, combinations import logging +import os +import sys from collections import Counter +from itertools import chain, combinations + logging.basicConfig(level=logging.INFO) log = logging.getLogger() @@ -46,15 +50,10 @@ def relative_glob(*pattern, basedir=os.curdir, **kwargs): # prefix all patterns with basedir and expand names = chain(*[glob.glob(os.path.join(tobase, pat), **kwargs) for pat in pattern]) # remove prefix from all expanded names - return [name[len(tobase)+1:] for name in names] + return [name[len(tobase) + 1 :] for name in names] -def UserDocExtractor( - filenames, - basedir="..", - replace_ext='.rst', - outdir="userdocs/" - ): +def UserDocExtractor(filenames, basedir="..", replace_ext=".rst", outdir="userdocs/"): """ Extract all user documentation from given files. This method searches for "BeginUserDocs" and "EndUserDocs" keywords and @@ -88,10 +87,10 @@ def UserDocExtractor( mapping tags to lists of documentation filenames (relative to `outdir`). """ if not os.path.exists(outdir): - log.info("creating output directory "+outdir) + log.info("creating output directory " + outdir) os.mkdir(outdir) - userdoc_re = re.compile(r'BeginUserDocs:?\s*(?P([\w -]+(,\s*)?)*)\n+(?P(.|\n)*)EndUserDocs') - tagdict = dict() # map tags to lists of documents + userdoc_re = re.compile(r"BeginUserDocs:?\s*(?P([\w -]+(,\s*)?)*)\n+(?P(.|\n)*)EndUserDocs") + tagdict = dict() # map tags to lists of documents nfiles_total = 0 with tqdm(unit="files", total=len(filenames)) as progress: for filename in filenames: @@ -100,16 +99,16 @@ def UserDocExtractor( log.warning("extracting user documentation from %s...", filename) nfiles_total += 1 match = None - with open(os.path.join(basedir, filename), 'r', encoding='utf8') as infile: + with open(os.path.join(basedir, filename), "r", encoding="utf8") as infile: match = userdoc_re.search(infile.read()) if not match: log.warning("No user documentation found in " + filename) continue outname = os.path.basename(os.path.splitext(filename)[0]) + replace_ext - tags = [t.strip() for t in match.group('tags').split(',')] + tags = [t.strip() for t in match.group("tags").split(",")] for tag in tags: tagdict.setdefault(tag, list()).append(outname) - doc = match.group('doc') + doc = match.group("doc") try: doc = rewrite_short_description(doc, filename) except ValueError as e: @@ -128,7 +127,7 @@ def UserDocExtractor( def rewrite_short_description(doc, filename, short_description="Short description"): - ''' + """ Modify a given text by replacing the first section named as given in `short_description` by the filename and content of that section. Parameters @@ -144,31 +143,27 @@ def rewrite_short_description(doc, filename, short_description="Short descriptio ------- str original parameter doc with short_description section replaced - ''' + """ titles = getTitles(doc) if not titles: raise ValueError("No sections found in '%s'!" % filename) name = os.path.splitext(os.path.basename(filename))[0] - for title, nexttitle in zip(titles, titles[1:]+[None]): + for title, nexttitle in zip(titles, titles[1:] + [None]): if title.group(1) != short_description: continue secstart = title.end() secend = len(doc) + 1 # last section ends at end of document if nexttitle: secend = nexttitle.start() - sdesc = doc[secstart:secend].strip().replace('\n', ' ') + sdesc = doc[secstart:secend].strip().replace("\n", " ") fixed_title = "%s – %s" % (name, sdesc) - return ( - doc[:title.start()] + - fixed_title + "\n" + "=" * len(fixed_title) + "\n\n" + - doc[secend:] - ) + return doc[: title.start()] + fixed_title + "\n" + "=" * len(fixed_title) + "\n\n" + doc[secend:] raise ValueError("No section '%s' found in %s!" % (short_description, filename)) def rewrite_see_also(doc, filename, tags, see_also="See also"): - ''' + """ Replace the content of a section named `see_also` in the document `doc` with links to indices of all its tags. The original content of the section -if not empty- will discarded and @@ -189,14 +184,14 @@ def rewrite_see_also(doc, filename, tags, see_also="See also"): ------- str original parameter doc with see_also section replaced - ''' + """ titles = getTitles(doc) if not titles: raise ValueError("No sections found in '%s'!" % filename) def rightcase(text): - ''' + """ Make text title-case except for acronyms, where an acronym is identified simply by being all upper-case. This function operates on the whole string, so a text with mixed @@ -211,27 +206,32 @@ def rightcase(text): str original text with poentially different characters being upper-/lower-case. - ''' + """ if text != text.upper(): return text.title() # title-case any tag that is not an acronym - return text # return acronyms unmodified + return text # return acronyms unmodified - for title, nexttitle in zip(titles, titles[1:]+[None]): + for title, nexttitle in zip(titles, titles[1:] + [None]): if title.group(1) != see_also: continue secstart = title.end() secend = len(doc) + 1 # last section ends at end of document if nexttitle: secend = nexttitle.start() - original = doc[secstart:secend].strip().replace('\n', ' ') + original = doc[secstart:secend].strip().replace("\n", " ") if original: - log.warning("dropping manual 'see also' list in %s user docs: '%s'", filename, original) - return ( - doc[:secstart] + - "\n" + ", ".join([":doc:`{taglabel} `".format(tag=tag, taglabel=rightcase(tag)) - for tag in tags]) + "\n\n" + - doc[secend:] + log.warning( + "dropping manual 'see also' list in %s user docs: '%s'", + filename, + original, ) + return ( + doc[:secstart] + + "\n" + + ", ".join([":doc:`{taglabel} `".format(tag=tag, taglabel=rightcase(tag)) for tag in tags]) + + "\n\n" + + doc[secend:] + ) raise ValueError("No section '%s' found in %s!" % (see_also, filename)) @@ -277,11 +277,11 @@ def make_hierarchy(tags, *basetags): if tree.values(): remaining = baseitems.difference(set.union(*tree.values())) if remaining: - tree[''] = remaining + tree[""] = remaining return {basetags: tree} -def rst_index(hierarchy, current_tags=[], underlines='=-~', top=True): +def rst_index(hierarchy, current_tags=[], underlines="=-~", top=True): """ Create an index page from a given hierarchical dict of documents. The given `hierarchy` is pretty-printed and returned as a string. @@ -303,14 +303,13 @@ def rst_index(hierarchy, current_tags=[], underlines='=-~', top=True): str formatted pretty index. """ + def mktitle(t, ul, link=None): text = t if t != t.upper(): text = t.title() # title-case any tag that is not an acronym - title = ':doc:`{text} <{filename}>`'.format( - text=text, - filename=link or "index_"+t) - text = title+'\n'+ul*len(title)+'\n' + title = ":doc:`{text} <{filename}>`".format(text=text, filename=link or "index_" + t) + text = title + "\n" + ul * len(title) + "\n" return text def mkitem(t): @@ -322,19 +321,18 @@ def mkitem(t): if len(hierarchy.keys()) == 1: page_title += ": " + ", ".join(current_tags) output.append(page_title) - output.append(underlines[0]*len(page_title)+"\n") + output.append(underlines[0] * len(page_title) + "\n") if len(hierarchy.keys()) != 1: underlines = underlines[1:] for tags, items in sorted(hierarchy.items()): - if "NOINDEX" in tags: continue if isinstance(tags, str): title = tags else: title = " & ".join(tags) - if title and not len(hierarchy) == 1: # not print title if already selected by current_tags + if title and not len(hierarchy) == 1: # not print title if already selected by current_tags output.append(mktitle(title, underlines[0])) if isinstance(items, dict): output.append(rst_index(items, current_tags, underlines[1:], top=False)) @@ -388,15 +386,19 @@ def CreateTagIndices(tags, outdir="userdocs/"): for tag, count in sorted([(tag, len(lst)) for tag, lst in tags.items()], key=lambda x: x[1]): log.info(" %%%ds tag in %%d files" % maxtaglen, tag, count) if "" in taglist: - taglist.remove('') + taglist.remove("") indexfiles = list() - depth = min(4, len(taglist)) # how many levels of indices to create at most - nindices = sum([comb(len(taglist), L) for L in range(depth-1)]) + depth = min(4, len(taglist)) # how many levels of indices to create at most + nindices = sum([comb(len(taglist), L) for L in range(depth - 1)]) log.info("indices down to level %d → %d possible keyword combinations", depth, nindices) - for current_tags in tqdm(chain(*[combinations(taglist, L) for L in range(depth-1)]), unit="idx", - desc="keyword indices", total=nindices): + for current_tags in tqdm( + chain(*[combinations(taglist, L) for L in range(depth - 1)]), + unit="idx", + desc="keyword indices", + total=nindices, + ): current_tags = sorted(current_tags) - indexname = "index%s.rst" % "".join(["_"+x for x in current_tags]) + indexname = "index%s.rst" % "".join(["_" + x for x in current_tags]) hier = make_hierarchy(tags.copy(), *current_tags) if not any(hier.values()): @@ -404,11 +406,15 @@ def CreateTagIndices(tags, outdir="userdocs/"): continue nfiles = len(set.union(*chain([set(subtag) for subtag in hier.values()]))) if nfiles < 2: - log.warning("skipping index for %s, as it links only to %d distinct file(s)", set(hier.keys()), nfiles) + log.warning( + "skipping index for %s, as it links only to %d distinct file(s)", + set(hier.keys()), + nfiles, + ) continue log.debug("generating index for %s...", str(current_tags)) indextext = rst_index(hier, current_tags) - with open(os.path.join(outdir, indexname), 'w') as outfile: + with open(os.path.join(outdir, indexname), "w") as outfile: outfile.write(indextext) indexfiles.append(indexname) log.info("%4d non-empty index files generated", len(indexfiles)) @@ -419,6 +425,7 @@ class JsonWriter(object): """ Helper class to have a unified data output interface. """ + def __init__(self, outdir): self.outdir = outdir log.info("writing JSON files to %s", self.outdir) @@ -428,13 +435,13 @@ def write(self, obj, name): Store the given object with the given name. """ outname = os.path.join(self.outdir, name + ".json") - with open(outname, 'w') as outfile: + with open(outname, "w") as outfile: json.dump(obj, outfile) log.info("data saved as " + outname) def getTitles(text): - ''' + """ extract all sections from the given RST file Parameters ---------- @@ -444,24 +451,31 @@ def getTitles(text): ------- list elements are the section title re.match objects - ''' - titlechar = r'\+' - title_re = re.compile(r'^(?P.+)\n(?P<underline>'+titlechar+r'+)$', re.MULTILINE) + """ + titlechar = r"\+" + title_re = re.compile(r"^(?P<title>.+)\n(?P<underline>" + titlechar + r"+)$", re.MULTILINE) titles = [] # extract all titles for match in title_re.finditer(text): - log.debug("MATCH from %s to %s: %s", match.start(), match.end(), pformat(match.groupdict())) - if len(match.group('title')) != len(match.group('underline')): - log.warning("Length of section title '%s' (%d) does not match length of underline (%d)", - match.group('title'), - len(match.group('title')), - len(match.group('underline'))) + log.debug( + "MATCH from %s to %s: %s", + match.start(), + match.end(), + pformat(match.groupdict()), + ) + if len(match.group("title")) != len(match.group("underline")): + log.warning( + "Length of section title '%s' (%d) does not match length of underline (%d)", + match.group("title"), + len(match.group("title")), + len(match.group("underline")), + ) titles.append(match) return titles def getSections(text, titles=None): - ''' + """ Extract sections between titles Parameters ---------- @@ -474,22 +488,22 @@ def getSections(text, titles=None): ------- list tuples of each title re.match object and the text of the following section. - ''' + """ if titles is None: titles = getTitles(text) sections = list() - for title, following in zip(titles, titles[1:]+[None]): + for title, following in zip(titles, titles[1:] + [None]): secstart = title.end() - secend = None # None = end of string + secend = None # None = end of string if following: secend = following.start() - if title.group('title') in sections: - log.warning('Duplicate title in user documentation of %s', filename) - sections.append((title.group('title'), text[secstart:secend].strip())) + if title.group("title") in sections: + log.warning("Duplicate title in user documentation of %s", filename) + sections.append((title.group("title"), text[secstart:secend].strip())) return sections -def ExtractUserDocs(listoffiles, basedir='..', outdir='doc_build/'): +def ExtractUserDocs(listoffiles, basedir="..", outdir="doc_build/"): """ Extract and build all user documentation and build tag indices. Writes extracted information to JSON files in outdir. In particular the @@ -515,8 +529,6 @@ def ExtractUserDocs(listoffiles, basedir='..', outdir='doc_build/'): with open(os.path.join(outdir, "toc-tree.json"), "w") as tocfile: json.dump(list(set(toc_list)) + list(set(idx_list)), tocfile) -if __name__ == '__main__': - ExtractUserDocs( - relative_glob("src/*.h", basedir='..'), - outdir="models/" - ) + +if __name__ == "__main__": + ExtractUserDocs(relative_glob("src/*.h", basedir=".."), outdir="models/") diff --git a/doc/guides/differences_nest-gpu_nest.rst b/doc/guides/differences_nest-gpu_nest.rst index 56957cf0e..fec5c172b 100644 --- a/doc/guides/differences_nest-gpu_nest.rst +++ b/doc/guides/differences_nest-gpu_nest.rst @@ -4,7 +4,7 @@ Differences in usage between NEST GPU and NEST Aeif neuron models ------------------ -Aeif neuron models in NEST GPU have both a non-multisynapse and +Aeif neuron models in NEST GPU have both a non-multisynapse and a multisynapse implementation. For the multisynapse implementation, the number of receptor ports must be specified at neuron creation: @@ -20,7 +20,7 @@ not from 1 as in NEST multisynapse models). The non-multisynapse implementation of aeif neuron models has two receptor ports (i.e. excitatory and inhibitory), and thus the connections require to -specify the receptor port through the synapse property ``receptor`` (0 +specify the receptor port through the synapse property ``receptor`` (0 for the excitatory port and 1 for the inhibitory port). Differently from NEST, the connection weights related to the inhibitory port must be positive. @@ -39,7 +39,7 @@ following example: import nestgpu as ngpu - neuron = ngpu.Create("aeif_cond_beta", 3) # create a population of 3 neurons + neuron = ngpu.Create("aeif_cond_beta", 3) # create a population of 3 neurons ngpu.SetStatus(neuron, {"I_e":1000.0}) # set a constant input current diff --git a/doc/guides/how_to_record_spikes.rst b/doc/guides/how_to_record_spikes.rst index 932022b5c..095c61b40 100644 --- a/doc/guides/how_to_record_spikes.rst +++ b/doc/guides/how_to_record_spikes.rst @@ -7,8 +7,8 @@ using the device :doc:`spike_detector <../models/spike_detector>`. An alternative way, which is computationally faster than the ``spike_detector`` device, can be achieved using -the ``RecSpikeTimes`` method. -This method has to be activated before the ``Simulate`` +the ``RecSpikeTimes`` method. +This method has to be activated before the ``Simulate`` function through the command ``ActivateRecSpikeTimes`` in this way: :: @@ -19,8 +19,8 @@ where ``neurons`` is a population of N neurons created using the ``Create`` function, and ``N_max_spike_times`` is an integer which sets the maximum amount of spikes that can be recorded from each neuron of the population (needed to optimize GPU -memory). This method does not enable the recording of -a subset of neurons belonging to a population created in a +memory). This method does not enable the recording of +a subset of neurons belonging to a population created in a single ``Create`` function. After the simulation, the spike times of the recorded population @@ -31,5 +31,3 @@ of the population: :: spike_times = nestgpu.GetRecSpikeTimes(neurons) - - \ No newline at end of file diff --git a/doc/guides/implement_new_neuron_models.rst b/doc/guides/implement_new_neuron_models.rst index 8b750dcc8..479e9c2ae 100644 --- a/doc/guides/implement_new_neuron_models.rst +++ b/doc/guides/implement_new_neuron_models.rst @@ -149,7 +149,7 @@ editor. In the file user_m1.h, in the lines: N_SCAL_PARAM }; - + const std::string user_m1_scal_var_name[N_SCAL_VAR] = { "I_syn_ex", "I_syn_in", diff --git a/doc/guides/index.rst b/doc/guides/index.rst index edb497ca5..f3de20afe 100644 --- a/doc/guides/index.rst +++ b/doc/guides/index.rst @@ -10,4 +10,3 @@ Here you can find details on some topics about NEST GPU. differences_nest-gpu_nest implement_new_neuron_models how_to_record_spikes - \ No newline at end of file diff --git a/doc/index.rst b/doc/index.rst index def5c4860..cda79a261 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -14,8 +14,8 @@ neurons. Can be used in Python, in C++ and in C. workflow. With this library it is possible to run relatively fast simulations of -large-scale networks of spiking neurons employing GPUs. -For instance, on a single NVIDIA GeForce RTX 2080 Ti GPU board it is +large-scale networks of spiking neurons employing GPUs. +For instance, on a single NVIDIA GeForce RTX 2080 Ti GPU board it is possible to simulate the activity of 1 million multisynapse AdEx neurons with 1000 synapse per neuron in little more than 70 seconds per second of neural activity using the fifth-order Runge-Kutta method with adaptive @@ -51,4 +51,4 @@ If you use NEST GPU in your work, please cite the publications on our :doc:`publ * :ref:`genindex` * :ref:`modindex` - * :ref:`search`! \ No newline at end of file + * :ref:`search`! diff --git a/doc/installation/cmake_options.rst b/doc/installation/cmake_options.rst index 0c6b3c295..075d41083 100644 --- a/doc/installation/cmake_options.rst +++ b/doc/installation/cmake_options.rst @@ -23,7 +23,7 @@ NEST GPU allows for several configuration options for custom builds: +-----------------------------------------------+----------------------------------------------------------------+ | ``-Dcythonize-pynestpgu=[OFF|ON]`` | Use Cython to cythonize pynestgpukernel.pyx. | - | | If OFF, NEST GPU Python interface has to be build from a | + | | If OFF, NEST GPU Python interface has to be build from a | | | pre-cythonized pynestgpukernel.pyx. [default=ON] | +-----------------------------------------------+----------------------------------------------------------------+ @@ -99,4 +99,3 @@ Generic build configuration +-----------------------------------------------+----------------------------------------------------------------+ | ``-Dwith-version-suffix=[str]`` | Set a user defined version suffix. [default=''] | +-----------------------------------------------+----------------------------------------------------------------+ - diff --git a/doc/installation/index.rst b/doc/installation/index.rst index 7fb99e9f9..1852fb613 100644 --- a/doc/installation/index.rst +++ b/doc/installation/index.rst @@ -5,7 +5,7 @@ Requirements ------------ To build NEST GPU you need `CMake <https://cmake.org/install>`_ (version 3.17 or higher). You also need the `NVIDIA drivers <https://www.nvidia.com/Download/index.aspx?lang=en-us>`_ -for the GPU card installed in your machine and the +for the GPU card installed in your machine and the `NVIDIA CUDA development toolkit <https://developer.nvidia.com/cuda-toolkit>`_. To use the NEST GPU Python interface you need `Python 3 <https://www.python.org/>`_, `Numpy <https://numpy.org/>`_, `Scipy <https://scipy.org/>`_, @@ -65,7 +65,7 @@ you can find a guide to install the NVIDIA drivers for the GPU card on your mach cd nest-gpu-x-build -* Configure NEST GPU. For additional ``cmake`` options see the :doc:`CMake Options <cmake_options>` of this docuentation. +* Configure NEST GPU. For additional ``cmake`` options see the :doc:`CMake Options <cmake_options>` of this docuentation. Without the additional options you can type: .. code-block:: sh diff --git a/doc/publications.rst b/doc/publications.rst index 48d08ed66..37de96f0e 100644 --- a/doc/publications.rst +++ b/doc/publications.rst @@ -1,7 +1,7 @@ NEST GPU Publications ===================== -* Golosio B, Villamar J, Tiddia G, Pastorelli E, Stapmanns J, Fanti V, Paolucci PS, Morrison A and Senk J. (2023) Runtime Construction of Large-Scale Spiking Neuronal Network Models on GPU Devices. Applied Sciences; 13(17):9598. doi: https://doi.org/10.3390/app13179598 +* Golosio B, Villamar J, Tiddia G, Pastorelli E, Stapmanns J, Fanti V, Paolucci PS, Morrison A and Senk J. (2023) Runtime Construction of Large-Scale Spiking Neuronal Network Models on GPU Devices. Applied Sciences; 13(17):9598. doi: https://doi.org/10.3390/app13179598 * Tiddia G, Golosio B, Albers J, Senk J, Simula F, Pronold J, Fanti V, Pastorelli E, Paolucci PS and van Albada SJ (2022) Fast Simulation of a Multi-Area Spiking Network Model of Macaque Cortex on an MPI-GPU Cluster. Front. Neuroinform. 16:883333. doi: https://doi.org/10.3389/fninf.2022.883333 diff --git a/doc/requirements.txt b/doc/requirements.txt index 531d7e753..6a9530f25 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -2,4 +2,4 @@ nbsphinx numpy sphinx_rtd_theme sphinx-tabs -tqdm \ No newline at end of file +tqdm diff --git a/m4/pypath.m4 b/m4/pypath.m4 index 5af6736d8..a4e62d14f 100644 --- a/m4/pypath.m4 +++ b/m4/pypath.m4 @@ -1,4 +1,4 @@ -AC_DEFUN([adl_CHECK_PYTHON], +AC_DEFUN([adl_CHECK_PYTHON], [AM_PATH_PYTHON([2.0]) AC_CACHE_CHECK([for $am_display_PYTHON includes directory], [adl_cv_python_inc], diff --git a/macOS/pythonlib/nestgpu.py b/macOS/pythonlib/nestgpu.py index b8886ed43..1df9f97b5 100644 --- a/macOS/pythonlib/nestgpu.py +++ b/macOS/pythonlib/nestgpu.py @@ -1,26 +1,29 @@ """ Python interface for NESTGPU""" -import sys, platform -import ctypes, ctypes.util +import ctypes +import ctypes.util import os +import platform +import sys import unicodedata -print('-----------------------------------------------------------------') -print('NESTGPU') -print('A GPU-MPI library for simulation of large-scale networks') -print(' of spiking neurons') -print('Homepage: https://github.com/golosio/NESTGPU') -print('Author: B. Golosio, University of Cagliari') -print('email: golosio@unica.it') -print('-----------------------------------------------------------------') +print("-----------------------------------------------------------------") +print("NESTGPU") +print("A GPU-MPI library for simulation of large-scale networks") +print(" of spiking neurons") +print("Homepage: https://github.com/golosio/NESTGPU") +print("Author: B. Golosio, University of Cagliari") +print("email: golosio@unica.it") +print("-----------------------------------------------------------------") -lib_path="/usr/local/lib/libnestgpu.so" -_nestgpu=ctypes.CDLL(lib_path) +lib_path = "/usr/local/lib/libnestgpu.so" +_nestgpu = ctypes.CDLL(lib_path) c_float_p = ctypes.POINTER(ctypes.c_float) c_int_p = ctypes.POINTER(ctypes.c_int) c_char_p = ctypes.POINTER(ctypes.c_char) c_void_p = ctypes.c_void_p + class NodeSeq(object): def __init__(self, i0, n=1): if i0 == None: @@ -30,24 +33,27 @@ def __init__(self, i0, n=1): self.n = n def Subseq(self, first, last): - if first<0 | last<first: + if first < 0 | last < first: raise ValueError("Sequence subset range error") - if last>=self.n: + if last >= self.n: raise ValueError("Sequence subset out of range") return NodeSeq(self.i0 + first, last - first + 1) + def __getitem__(self, i): - if type(i)==slice: + if type(i) == slice: if i.step != None: raise ValueError("Subsequence cannot have a step") return self.Subseq(i.start, i.stop) - - if i<0: + + if i < 0: raise ValueError("Sequence index cannot be negative") - if i>=self.n: + if i >= self.n: raise ValueError("Sequence index out of range") return self.i0 + i + def ToList(self): return list(range(self.i0, self.i0 + self.n)) + def __len__(self): return self.n @@ -58,57 +64,78 @@ def __init__(self, i_source, i_group, i_conn): self.i_group = i_group self.i_conn = i_conn + class SynGroup(object): def __init__(self, i_syn_group): self.i_syn_group = i_syn_group + def to_byte_str(s): - if type(s)==str: - return s.encode('ascii') - elif type(s)==bytes: + if type(s) == str: + return s.encode("ascii") + elif type(s) == bytes: return s else: raise ValueError("Variable cannot be converted to string") + def to_def_str(s): - if (sys.version_info >= (3, 0)): + if sys.version_info >= (3, 0): return s.decode("utf-8") else: return s + def waitenter(val): - if (sys.version_info >= (3, 0)): + if sys.version_info >= (3, 0): return input(val) else: return raw_input(val) - -conn_rule_name = ("one_to_one", "all_to_all", "fixed_total_number", - "fixed_indegree", "fixed_outdegree") - + + +conn_rule_name = ( + "one_to_one", + "all_to_all", + "fixed_total_number", + "fixed_indegree", + "fixed_outdegree", +) + NESTGPU_GetErrorMessage = _nestgpu.NESTGPU_GetErrorMessage NESTGPU_GetErrorMessage.restype = ctypes.POINTER(ctypes.c_char) + + def GetErrorMessage(): "Get error message from NESTGPU exception" message = ctypes.cast(NESTGPU_GetErrorMessage(), ctypes.c_char_p).value return message - + + NESTGPU_GetErrorCode = _nestgpu.NESTGPU_GetErrorCode NESTGPU_GetErrorCode.restype = ctypes.c_ubyte + + def GetErrorCode(): "Get error code from NESTGPU exception" return NESTGPU_GetErrorCode() - + + NESTGPU_SetOnException = _nestgpu.NESTGPU_SetOnException NESTGPU_SetOnException.argtypes = (ctypes.c_int,) + + def SetOnException(on_exception): "Define whether handle exceptions (1) or exit (0) in case of errors" return NESTGPU_SetOnException(ctypes.c_int(on_exception)) + SetOnException(1) NESTGPU_SetRandomSeed = _nestgpu.NESTGPU_SetRandomSeed NESTGPU_SetRandomSeed.argtypes = (ctypes.c_ulonglong,) NESTGPU_SetRandomSeed.restype = ctypes.c_int + + def SetRandomSeed(seed): "Set seed for random number generation" ret = NESTGPU_SetRandomSeed(ctypes.c_ulonglong(seed)) @@ -120,6 +147,8 @@ def SetRandomSeed(seed): NESTGPU_SetTimeResolution = _nestgpu.NESTGPU_SetTimeResolution NESTGPU_SetTimeResolution.argtypes = (ctypes.c_float,) NESTGPU_SetTimeResolution.restype = ctypes.c_int + + def SetTimeResolution(time_res): "Set time resolution in ms" ret = NESTGPU_SetTimeResolution(ctypes.c_float(time_res)) @@ -127,8 +156,11 @@ def SetTimeResolution(time_res): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetTimeResolution = _nestgpu.NESTGPU_GetTimeResolution NESTGPU_GetTimeResolution.restype = ctypes.c_float + + def GetTimeResolution(): "Get time resolution in ms" ret = NESTGPU_GetTimeResolution() @@ -140,6 +172,8 @@ def GetTimeResolution(): NESTGPU_SetMaxSpikeBufferSize = _nestgpu.NESTGPU_SetMaxSpikeBufferSize NESTGPU_SetMaxSpikeBufferSize.argtypes = (ctypes.c_int,) NESTGPU_SetMaxSpikeBufferSize.restype = ctypes.c_int + + def SetMaxSpikeBufferSize(max_size): "Set maximum size of spike buffer per node" ret = NESTGPU_SetMaxSpikeBufferSize(ctypes.c_int(max_size)) @@ -150,6 +184,8 @@ def SetMaxSpikeBufferSize(max_size): NESTGPU_GetMaxSpikeBufferSize = _nestgpu.NESTGPU_GetMaxSpikeBufferSize NESTGPU_GetMaxSpikeBufferSize.restype = ctypes.c_int + + def GetMaxSpikeBufferSize(): "Get maximum size of spike buffer per node" ret = NESTGPU_GetMaxSpikeBufferSize() @@ -161,6 +197,8 @@ def GetMaxSpikeBufferSize(): NESTGPU_SetSimTime = _nestgpu.NESTGPU_SetSimTime NESTGPU_SetSimTime.argtypes = (ctypes.c_float,) NESTGPU_SetSimTime.restype = ctypes.c_int + + def SetSimTime(sim_time): "Set neural activity simulated time in ms" ret = NESTGPU_SetSimTime(ctypes.c_float(sim_time)) @@ -172,18 +210,20 @@ def SetSimTime(sim_time): NESTGPU_Create = _nestgpu.NESTGPU_Create NESTGPU_Create.argtypes = (c_char_p, ctypes.c_int, ctypes.c_int) NESTGPU_Create.restype = ctypes.c_int + + def Create(model_name, n_node=1, n_ports=1, status_dict=None): "Create a neuron group" - if (type(status_dict)==dict): + if type(status_dict) == dict: node_group = Create(model_name, n_node, n_ports) SetStatus(node_group, status_dict) return node_group - - elif status_dict!=None: + + elif status_dict != None: raise ValueError("Wrong argument in Create") - - c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), len(model_name)+1) - i_node =NESTGPU_Create(c_model_name, ctypes.c_int(n_node), ctypes.c_int(n_ports)) + + c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), len(model_name) + 1) + i_node = NESTGPU_Create(c_model_name, ctypes.c_int(n_node), ctypes.c_int(n_ports)) ret = NodeSeq(i_node, n_node) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -193,9 +233,11 @@ def Create(model_name, n_node=1, n_ports=1, status_dict=None): NESTGPU_CreatePoissonGenerator = _nestgpu.NESTGPU_CreatePoissonGenerator NESTGPU_CreatePoissonGenerator.argtypes = (ctypes.c_int, ctypes.c_float) NESTGPU_CreatePoissonGenerator.restype = ctypes.c_int + + def CreatePoissonGenerator(n_node, rate): "Create a poisson-distributed spike generator" - i_node = NESTGPU_CreatePoissonGenerator(ctypes.c_int(n_node), ctypes.c_float(rate)) + i_node = NESTGPU_CreatePoissonGenerator(ctypes.c_int(n_node), ctypes.c_float(rate)) ret = NodeSeq(i_node, n_node) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -203,24 +245,34 @@ def CreatePoissonGenerator(n_node, rate): NESTGPU_CreateRecord = _nestgpu.NESTGPU_CreateRecord -NESTGPU_CreateRecord.argtypes = (c_char_p, ctypes.POINTER(c_char_p), c_int_p, c_int_p, ctypes.c_int) +NESTGPU_CreateRecord.argtypes = ( + c_char_p, + ctypes.POINTER(c_char_p), + c_int_p, + c_int_p, + ctypes.c_int, +) NESTGPU_CreateRecord.restype = ctypes.c_int + + def CreateRecord(file_name, var_name_list, i_node_list, i_port_list): "Create a record of neuron variables" n_node = len(i_node_list) - c_file_name = ctypes.create_string_buffer(to_byte_str(file_name), len(file_name)+1) + c_file_name = ctypes.create_string_buffer(to_byte_str(file_name), len(file_name) + 1) array_int_type = ctypes.c_int * n_node array_char_pt_type = c_char_p * n_node - c_var_name_list=[] + c_var_name_list = [] for i in range(n_node): - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name_list[i]), len(var_name_list[i])+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name_list[i]), len(var_name_list[i]) + 1) c_var_name_list.append(c_var_name) - ret = NESTGPU_CreateRecord(c_file_name, - array_char_pt_type(*c_var_name_list), - array_int_type(*i_node_list), - array_int_type(*i_port_list), - ctypes.c_int(n_node)) + ret = NESTGPU_CreateRecord( + c_file_name, + array_char_pt_type(*c_var_name_list), + array_int_type(*i_node_list), + array_int_type(*i_port_list), + ctypes.c_int(n_node), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -229,6 +281,8 @@ def CreateRecord(file_name, var_name_list, i_node_list, i_port_list): NESTGPU_GetRecordDataRows = _nestgpu.NESTGPU_GetRecordDataRows NESTGPU_GetRecordDataRows.argtypes = (ctypes.c_int,) NESTGPU_GetRecordDataRows.restype = ctypes.c_int + + def GetRecordDataRows(i_record): "Get record n. of rows" ret = NESTGPU_GetRecordDataRows(ctypes.c_int(i_record)) @@ -240,6 +294,8 @@ def GetRecordDataRows(i_record): NESTGPU_GetRecordDataColumns = _nestgpu.NESTGPU_GetRecordDataColumns NESTGPU_GetRecordDataColumns.argtypes = (ctypes.c_int,) NESTGPU_GetRecordDataColumns.restype = ctypes.c_int + + def GetRecordDataColumns(i_record): "Get record n. of columns" ret = NESTGPU_GetRecordDataColumns(ctypes.c_int(i_record)) @@ -251,6 +307,8 @@ def GetRecordDataColumns(i_record): NESTGPU_GetRecordData = _nestgpu.NESTGPU_GetRecordData NESTGPU_GetRecordData.argtypes = (ctypes.c_int,) NESTGPU_GetRecordData.restype = ctypes.POINTER(c_float_p) + + def GetRecordData(i_record): "Get record data" data_arr_pt = NESTGPU_GetRecordData(ctypes.c_int(i_record)) @@ -261,85 +319,111 @@ def GetRecordData(i_record): row_list = [] for ic in range(nc): row_list.append(data_arr_pt[ir][ic]) - + data_list.append(row_list) - - ret = data_list + + ret = data_list if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronScalParam = _nestgpu.NESTGPU_SetNeuronScalParam -NESTGPU_SetNeuronScalParam.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, ctypes.c_float) +NESTGPU_SetNeuronScalParam.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_char_p, + ctypes.c_float, +) NESTGPU_SetNeuronScalParam.restype = ctypes.c_int + + def SetNeuronScalParam(i_node, n_node, param_name, val): "Set neuron scalar parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = NESTGPU_SetNeuronScalParam(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_param_name, - ctypes.c_float(val)) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SetNeuronScalParam(ctypes.c_int(i_node), ctypes.c_int(n_node), c_param_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronArrayParam = _nestgpu.NESTGPU_SetNeuronArrayParam -NESTGPU_SetNeuronArrayParam.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, c_float_p, ctypes.c_int) +NESTGPU_SetNeuronArrayParam.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_char_p, + c_float_p, + ctypes.c_int, +) NESTGPU_SetNeuronArrayParam.restype = ctypes.c_int + + def SetNeuronArrayParam(i_node, n_node, param_name, param_list): "Set neuron array parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) array_size = len(param_list) array_float_type = ctypes.c_float * array_size - ret = NESTGPU_SetNeuronArrayParam(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_param_name, - array_float_type(*param_list), - ctypes.c_int(array_size)) + ret = NESTGPU_SetNeuronArrayParam( + ctypes.c_int(i_node), + ctypes.c_int(n_node), + c_param_name, + array_float_type(*param_list), + ctypes.c_int(array_size), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronPtScalParam = _nestgpu.NESTGPU_SetNeuronPtScalParam -NESTGPU_SetNeuronPtScalParam.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p, ctypes.c_float) +NESTGPU_SetNeuronPtScalParam.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + c_char_p, + ctypes.c_float, +) NESTGPU_SetNeuronPtScalParam.restype = ctypes.c_int + + def SetNeuronPtScalParam(nodes, param_name, val): "Set neuron list scalar parameter value" n_node = len(nodes) - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - ret = NESTGPU_SetNeuronPtScalParam(node_pt, - ctypes.c_int(n_node), c_param_name, - ctypes.c_float(val)) + ret = NESTGPU_SetNeuronPtScalParam(node_pt, ctypes.c_int(n_node), c_param_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronPtArrayParam = _nestgpu.NESTGPU_SetNeuronPtArrayParam -NESTGPU_SetNeuronPtArrayParam.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p, c_float_p, - ctypes.c_int) +NESTGPU_SetNeuronPtArrayParam.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + c_char_p, + c_float_p, + ctypes.c_int, +) NESTGPU_SetNeuronPtArrayParam.restype = ctypes.c_int + + def SetNeuronPtArrayParam(nodes, param_name, param_list): "Set neuron list array parameter value" n_node = len(nodes) - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - + array_size = len(param_list) array_float_type = ctypes.c_float * array_size - ret = NESTGPU_SetNeuronPtArrayParam(node_pt, - ctypes.c_int(n_node), - c_param_name, - array_float_type(*param_list), - ctypes.c_int(array_size)) + ret = NESTGPU_SetNeuronPtArrayParam( + node_pt, + ctypes.c_int(n_node), + c_param_name, + array_float_type(*param_list), + ctypes.c_int(array_size), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -348,11 +432,12 @@ def SetNeuronPtArrayParam(nodes, param_name, param_list): NESTGPU_IsNeuronScalParam = _nestgpu.NESTGPU_IsNeuronScalParam NESTGPU_IsNeuronScalParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronScalParam.restype = ctypes.c_int + + def IsNeuronScalParam(i_node, param_name): "Check name of neuron scalar parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = (NESTGPU_IsNeuronScalParam(ctypes.c_int(i_node), c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsNeuronScalParam(ctypes.c_int(i_node), c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -361,97 +446,128 @@ def IsNeuronScalParam(i_node, param_name): NESTGPU_IsNeuronPortParam = _nestgpu.NESTGPU_IsNeuronPortParam NESTGPU_IsNeuronPortParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronPortParam.restype = ctypes.c_int + + def IsNeuronPortParam(i_node, param_name): "Check name of neuron scalar parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_IsNeuronPortParam(ctypes.c_int(i_node), c_param_name)!= 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsNeuronPortParam(ctypes.c_int(i_node), c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_IsNeuronArrayParam = _nestgpu.NESTGPU_IsNeuronArrayParam NESTGPU_IsNeuronArrayParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronArrayParam.restype = ctypes.c_int + + def IsNeuronArrayParam(i_node, param_name): "Check name of neuron scalar parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_IsNeuronArrayParam(ctypes.c_int(i_node), c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsNeuronArrayParam(ctypes.c_int(i_node), c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_SetNeuronScalVar = _nestgpu.NESTGPU_SetNeuronScalVar -NESTGPU_SetNeuronScalVar.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, ctypes.c_float) +NESTGPU_SetNeuronScalVar.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_char_p, + ctypes.c_float, +) NESTGPU_SetNeuronScalVar.restype = ctypes.c_int + + def SetNeuronScalVar(i_node, n_node, var_name, val): "Set neuron scalar variable value" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) - ret = NESTGPU_SetNeuronScalVar(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_var_name, - ctypes.c_float(val)) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_SetNeuronScalVar(ctypes.c_int(i_node), ctypes.c_int(n_node), c_var_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronArrayVar = _nestgpu.NESTGPU_SetNeuronArrayVar -NESTGPU_SetNeuronArrayVar.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, c_float_p, ctypes.c_int) +NESTGPU_SetNeuronArrayVar.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_char_p, + c_float_p, + ctypes.c_int, +) NESTGPU_SetNeuronArrayVar.restype = ctypes.c_int + + def SetNeuronArrayVar(i_node, n_node, var_name, var_list): "Set neuron array variable value" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) array_size = len(var_list) array_float_type = ctypes.c_float * array_size - ret = NESTGPU_SetNeuronArrayVar(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_var_name, - array_float_type(*var_list), - ctypes.c_int(array_size)) + ret = NESTGPU_SetNeuronArrayVar( + ctypes.c_int(i_node), + ctypes.c_int(n_node), + c_var_name, + array_float_type(*var_list), + ctypes.c_int(array_size), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronPtScalVar = _nestgpu.NESTGPU_SetNeuronPtScalVar -NESTGPU_SetNeuronPtScalVar.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p, ctypes.c_float) +NESTGPU_SetNeuronPtScalVar.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + c_char_p, + ctypes.c_float, +) NESTGPU_SetNeuronPtScalVar.restype = ctypes.c_int + + def SetNeuronPtScalVar(nodes, var_name, val): "Set neuron list scalar variable value" n_node = len(nodes) - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - ret = NESTGPU_SetNeuronPtScalVar(node_pt, - ctypes.c_int(n_node), c_var_name, - ctypes.c_float(val)) + ret = NESTGPU_SetNeuronPtScalVar(node_pt, ctypes.c_int(n_node), c_var_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronPtArrayVar = _nestgpu.NESTGPU_SetNeuronPtArrayVar -NESTGPU_SetNeuronPtArrayVar.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p, c_float_p, - ctypes.c_int) +NESTGPU_SetNeuronPtArrayVar.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + c_char_p, + c_float_p, + ctypes.c_int, +) NESTGPU_SetNeuronPtArrayVar.restype = ctypes.c_int + + def SetNeuronPtArrayVar(nodes, var_name, var_list): "Set neuron list array variable value" n_node = len(nodes) - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) array_size = len(var_list) array_float_type = ctypes.c_float * array_size - ret = NESTGPU_SetNeuronPtArrayVar(node_pt, - ctypes.c_int(n_node), - c_var_name, - array_float_type(*var_list), - ctypes.c_int(array_size)) + ret = NESTGPU_SetNeuronPtArrayVar( + node_pt, + ctypes.c_int(n_node), + c_var_name, + array_float_type(*var_list), + ctypes.c_int(array_size), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -460,11 +576,12 @@ def SetNeuronPtArrayVar(nodes, var_name, var_list): NESTGPU_IsNeuronScalVar = _nestgpu.NESTGPU_IsNeuronScalVar NESTGPU_IsNeuronScalVar.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronScalVar.restype = ctypes.c_int + + def IsNeuronScalVar(i_node, var_name): "Check name of neuron scalar variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - ret = (NESTGPU_IsNeuronScalVar(ctypes.c_int(i_node), c_var_name)!=0) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_IsNeuronScalVar(ctypes.c_int(i_node), c_var_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -473,21 +590,26 @@ def IsNeuronScalVar(i_node, var_name): NESTGPU_IsNeuronPortVar = _nestgpu.NESTGPU_IsNeuronPortVar NESTGPU_IsNeuronPortVar.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronPortVar.restype = ctypes.c_int + + def IsNeuronPortVar(i_node, var_name): "Check name of neuron scalar variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) - ret = (NESTGPU_IsNeuronPortVar(ctypes.c_int(i_node), c_var_name)!= 0) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_IsNeuronPortVar(ctypes.c_int(i_node), c_var_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_IsNeuronArrayVar = _nestgpu.NESTGPU_IsNeuronArrayVar NESTGPU_IsNeuronArrayVar.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronArrayVar.restype = ctypes.c_int + + def IsNeuronArrayVar(i_node, var_name): "Check name of neuron array variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) - ret = (NESTGPU_IsNeuronArrayVar(ctypes.c_int(i_node), c_var_name)!=0) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_IsNeuronArrayVar(ctypes.c_int(i_node), c_var_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -496,76 +618,78 @@ def IsNeuronArrayVar(i_node, var_name): NESTGPU_GetNeuronParamSize = _nestgpu.NESTGPU_GetNeuronParamSize NESTGPU_GetNeuronParamSize.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetNeuronParamSize.restype = ctypes.c_int + + def GetNeuronParamSize(i_node, param_name): "Get neuron parameter array size" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = NESTGPU_GetNeuronParamSize(ctypes.c_int(i_node), c_param_name) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_GetNeuronParamSize(ctypes.c_int(i_node), c_param_name) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_GetNeuronParam = _nestgpu.NESTGPU_GetNeuronParam -NESTGPU_GetNeuronParam.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronParam.argtypes = (ctypes.c_int, ctypes.c_int, c_char_p) NESTGPU_GetNeuronParam.restype = c_float_p + + def GetNeuronParam(i_node, n_node, param_name): "Get neuron parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - data_pt = NESTGPU_GetNeuronParam(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_param_name) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + data_pt = NESTGPU_GetNeuronParam(ctypes.c_int(i_node), ctypes.c_int(n_node), c_param_name) array_size = GetNeuronParamSize(i_node, param_name) data_list = [] for i_node in range(n_node): row_list = [] for i in range(array_size): - row_list.append(data_pt[i_node*array_size + i]) + row_list.append(data_pt[i_node * array_size + i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_GetNeuronPtParam = _nestgpu.NESTGPU_GetNeuronPtParam -NESTGPU_GetNeuronPtParam.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronPtParam.argtypes = (ctypes.c_void_p, ctypes.c_int, c_char_p) NESTGPU_GetNeuronPtParam.restype = c_float_p + + def GetNeuronPtParam(nodes, param_name): "Get neuron list scalar parameter value" n_node = len(nodes) - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - data_pt = NESTGPU_GetNeuronPtParam(node_pt, - ctypes.c_int(n_node), c_param_name) + data_pt = NESTGPU_GetNeuronPtParam(node_pt, ctypes.c_int(n_node), c_param_name) array_size = GetNeuronParamSize(nodes[0], param_name) data_list = [] for i_node in range(n_node): row_list = [] for i in range(array_size): - row_list.append(data_pt[i_node*array_size + i]) + row_list.append(data_pt[i_node * array_size + i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetArrayParam = _nestgpu.NESTGPU_GetArrayParam NESTGPU_GetArrayParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetArrayParam.restype = c_float_p + + def GetArrayParam(i_node, n_node, param_name): "Get neuron array parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) data_list = [] for j_node in range(n_node): i_node1 = i_node + j_node @@ -575,17 +699,17 @@ def GetArrayParam(i_node, n_node, param_name): for i in range(array_size): row_list.append(data_pt[i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + def GetNeuronListArrayParam(node_list, param_name): "Get neuron array parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) data_list = [] for i_node in node_list: row_list = [] @@ -594,20 +718,23 @@ def GetNeuronListArrayParam(node_list, param_name): for i in range(array_size): row_list.append(data_pt[i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret -#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + +# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx NESTGPU_GetNeuronVarSize = _nestgpu.NESTGPU_GetNeuronVarSize NESTGPU_GetNeuronVarSize.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetNeuronVarSize.restype = ctypes.c_int + + def GetNeuronVarSize(i_node, var_name): "Get neuron variable array size" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) ret = NESTGPU_GetNeuronVarSize(ctypes.c_int(i_node), c_var_name) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -615,15 +742,14 @@ def GetNeuronVarSize(i_node, var_name): NESTGPU_GetNeuronVar = _nestgpu.NESTGPU_GetNeuronVar -NESTGPU_GetNeuronVar.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronVar.argtypes = (ctypes.c_int, ctypes.c_int, c_char_p) NESTGPU_GetNeuronVar.restype = c_float_p + + def GetNeuronVar(i_node, n_node, var_name): "Get neuron variable value" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - data_pt = NESTGPU_GetNeuronVar(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_var_name) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + data_pt = NESTGPU_GetNeuronVar(ctypes.c_int(i_node), ctypes.c_int(n_node), c_var_name) array_size = GetNeuronVarSize(i_node, var_name) @@ -631,51 +757,52 @@ def GetNeuronVar(i_node, n_node, var_name): for i_node in range(n_node): row_list = [] for i in range(array_size): - row_list.append(data_pt[i_node*array_size + i]) + row_list.append(data_pt[i_node * array_size + i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_GetNeuronPtVar = _nestgpu.NESTGPU_GetNeuronPtVar -NESTGPU_GetNeuronPtVar.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronPtVar.argtypes = (ctypes.c_void_p, ctypes.c_int, c_char_p) NESTGPU_GetNeuronPtVar.restype = c_float_p + + def GetNeuronPtVar(nodes, var_name): "Get neuron list scalar variable value" n_node = len(nodes) - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - data_pt = NESTGPU_GetNeuronPtVar(node_pt, - ctypes.c_int(n_node), c_var_name) + data_pt = NESTGPU_GetNeuronPtVar(node_pt, ctypes.c_int(n_node), c_var_name) array_size = GetNeuronVarSize(nodes[0], var_name) data_list = [] for i_node in range(n_node): row_list = [] for i in range(array_size): - row_list.append(data_pt[i_node*array_size + i]) + row_list.append(data_pt[i_node * array_size + i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetArrayVar = _nestgpu.NESTGPU_GetArrayVar NESTGPU_GetArrayVar.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetArrayVar.restype = c_float_p + + def GetArrayVar(i_node, n_node, var_name): "Get neuron array variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) data_list = [] for j_node in range(n_node): i_node1 = i_node + j_node @@ -685,9 +812,9 @@ def GetArrayVar(i_node, n_node, var_name): for i in range(array_size): row_list.append(data_pt[i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -695,8 +822,7 @@ def GetArrayVar(i_node, n_node, var_name): def GetNeuronListArrayVar(node_list, var_name): "Get neuron array variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) data_list = [] for i_node in node_list: row_list = [] @@ -705,40 +831,36 @@ def GetNeuronListArrayVar(node_list, var_name): for i in range(array_size): row_list.append(data_pt[i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + def GetNeuronStatus(nodes, var_name): "Get neuron group scalar or array variable or parameter" - if (type(nodes)!=list) & (type(nodes)!=tuple) & (type(nodes)!=NodeSeq): + if (type(nodes) != list) & (type(nodes) != tuple) & (type(nodes) != NodeSeq): raise ValueError("Unknown node type") - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - if type(nodes)==NodeSeq: - if (IsNeuronScalParam(nodes.i0, var_name) | - IsNeuronPortParam(nodes.i0, var_name)): + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + if type(nodes) == NodeSeq: + if IsNeuronScalParam(nodes.i0, var_name) | IsNeuronPortParam(nodes.i0, var_name): ret = GetNeuronParam(nodes.i0, nodes.n, var_name) elif IsNeuronArrayParam(nodes.i0, var_name): ret = GetArrayParam(nodes.i0, nodes.n, var_name) - elif (IsNeuronScalVar(nodes.i0, var_name) | - IsNeuronPortVar(nodes.i0, var_name)): + elif IsNeuronScalVar(nodes.i0, var_name) | IsNeuronPortVar(nodes.i0, var_name): ret = GetNeuronVar(nodes.i0, nodes.n, var_name) elif IsNeuronArrayVar(nodes.i0, var_name): ret = GetArrayVar(nodes.i0, nodes.n, var_name) else: raise ValueError("Unknown neuron variable or parameter") else: - if (IsNeuronScalParam(nodes[0], var_name) | - IsNeuronPortParam(nodes[0], var_name)): + if IsNeuronScalParam(nodes[0], var_name) | IsNeuronPortParam(nodes[0], var_name): ret = GetNeuronPtParam(nodes, var_name) elif IsNeuronArrayParam(nodes[0], var_name): ret = GetNeuronListArrayParam(nodes, var_name) - elif (IsNeuronScalVar(nodes[0], var_name) | - IsNeuronPortVar(nodes[0], var_name)): + elif IsNeuronScalVar(nodes[0], var_name) | IsNeuronPortVar(nodes[0], var_name): ret = GetNeuronPtVar(nodes, var_name) elif IsNeuronArrayVar(nodes[0], var_name): ret = GetNeuronListArrayVar(nodes, var_name) @@ -750,6 +872,8 @@ def GetNeuronStatus(nodes, var_name): NESTGPU_GetNScalVar = _nestgpu.NESTGPU_GetNScalVar NESTGPU_GetNScalVar.argtypes = (ctypes.c_int,) NESTGPU_GetNScalVar.restype = ctypes.c_int + + def GetNScalVar(i_node): "Get number of scalar variables for a given node" ret = NESTGPU_GetNScalVar(ctypes.c_int(i_node)) @@ -757,14 +881,16 @@ def GetNScalVar(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetScalVarNames = _nestgpu.NESTGPU_GetScalVarNames NESTGPU_GetScalVarNames.argtypes = (ctypes.c_int,) NESTGPU_GetScalVarNames.restype = ctypes.POINTER(c_char_p) + + def GetScalVarNames(i_node): "Get list of scalar variable names" n_var = GetNScalVar(i_node) - var_name_pp = ctypes.cast(NESTGPU_GetScalVarNames(ctypes.c_int(i_node)), - ctypes.POINTER(c_char_p)) + var_name_pp = ctypes.cast(NESTGPU_GetScalVarNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) var_name_list = [] for i in range(n_var): var_name_p = var_name_pp[i] @@ -774,9 +900,12 @@ def GetScalVarNames(i_node): raise ValueError(GetErrorMessage()) return var_name_list + NESTGPU_GetNPortVar = _nestgpu.NESTGPU_GetNPortVar NESTGPU_GetNPortVar.argtypes = (ctypes.c_int,) NESTGPU_GetNPortVar.restype = ctypes.c_int + + def GetNPortVar(i_node): "Get number of scalar variables for a given node" ret = NESTGPU_GetNPortVar(ctypes.c_int(i_node)) @@ -784,20 +913,22 @@ def GetNPortVar(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetPortVarNames = _nestgpu.NESTGPU_GetPortVarNames NESTGPU_GetPortVarNames.argtypes = (ctypes.c_int,) NESTGPU_GetPortVarNames.restype = ctypes.POINTER(c_char_p) + + def GetPortVarNames(i_node): "Get list of scalar variable names" n_var = GetNPortVar(i_node) - var_name_pp = ctypes.cast(NESTGPU_GetPortVarNames(ctypes.c_int(i_node)), - ctypes.POINTER(c_char_p)) + var_name_pp = ctypes.cast(NESTGPU_GetPortVarNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) var_name_list = [] for i in range(n_var): var_name_p = var_name_pp[i] var_name = ctypes.cast(var_name_p, ctypes.c_char_p).value var_name_list.append(to_def_str(var_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return var_name_list @@ -806,6 +937,8 @@ def GetPortVarNames(i_node): NESTGPU_GetNScalParam = _nestgpu.NESTGPU_GetNScalParam NESTGPU_GetNScalParam.argtypes = (ctypes.c_int,) NESTGPU_GetNScalParam.restype = ctypes.c_int + + def GetNScalParam(i_node): "Get number of scalar parameters for a given node" ret = NESTGPU_GetNScalParam(ctypes.c_int(i_node)) @@ -813,27 +946,32 @@ def GetNScalParam(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetScalParamNames = _nestgpu.NESTGPU_GetScalParamNames NESTGPU_GetScalParamNames.argtypes = (ctypes.c_int,) NESTGPU_GetScalParamNames.restype = ctypes.POINTER(c_char_p) + + def GetScalParamNames(i_node): "Get list of scalar parameter names" n_param = GetNScalParam(i_node) - param_name_pp = ctypes.cast(NESTGPU_GetScalParamNames( - ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetScalParamNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list + NESTGPU_GetNPortParam = _nestgpu.NESTGPU_GetNPortParam NESTGPU_GetNPortParam.argtypes = (ctypes.c_int,) NESTGPU_GetNPortParam.restype = ctypes.c_int + + def GetNPortParam(i_node): "Get number of scalar parameters for a given node" ret = NESTGPU_GetNPortParam(ctypes.c_int(i_node)) @@ -841,20 +979,22 @@ def GetNPortParam(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetPortParamNames = _nestgpu.NESTGPU_GetPortParamNames NESTGPU_GetPortParamNames.argtypes = (ctypes.c_int,) NESTGPU_GetPortParamNames.restype = ctypes.POINTER(c_char_p) + + def GetPortParamNames(i_node): "Get list of scalar parameter names" n_param = GetNPortParam(i_node) - param_name_pp = ctypes.cast(NESTGPU_GetPortParamNames( - ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetPortParamNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list @@ -863,6 +1003,8 @@ def GetPortParamNames(i_node): NESTGPU_GetNArrayParam = _nestgpu.NESTGPU_GetNArrayParam NESTGPU_GetNArrayParam.argtypes = (ctypes.c_int,) NESTGPU_GetNArrayParam.restype = ctypes.c_int + + def GetNArrayParam(i_node): "Get number of scalar parameters for a given node" ret = NESTGPU_GetNArrayParam(ctypes.c_int(i_node)) @@ -870,20 +1012,22 @@ def GetNArrayParam(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetArrayParamNames = _nestgpu.NESTGPU_GetArrayParamNames NESTGPU_GetArrayParamNames.argtypes = (ctypes.c_int,) NESTGPU_GetArrayParamNames.restype = ctypes.POINTER(c_char_p) + + def GetArrayParamNames(i_node): "Get list of scalar parameter names" n_param = GetNArrayParam(i_node) - param_name_pp = ctypes.cast(NESTGPU_GetArrayParamNames( - ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetArrayParamNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list @@ -892,6 +1036,8 @@ def GetArrayParamNames(i_node): NESTGPU_GetNArrayVar = _nestgpu.NESTGPU_GetNArrayVar NESTGPU_GetNArrayVar.argtypes = (ctypes.c_int,) NESTGPU_GetNArrayVar.restype = ctypes.c_int + + def GetNArrayVar(i_node): "Get number of scalar variables for a given node" ret = NESTGPU_GetNArrayVar(ctypes.c_int(i_node)) @@ -899,56 +1045,51 @@ def GetNArrayVar(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetArrayVarNames = _nestgpu.NESTGPU_GetArrayVarNames NESTGPU_GetArrayVarNames.argtypes = (ctypes.c_int,) NESTGPU_GetArrayVarNames.restype = ctypes.POINTER(c_char_p) + + def GetArrayVarNames(i_node): "Get list of scalar variable names" n_var = GetNArrayVar(i_node) - var_name_pp = ctypes.cast(NESTGPU_GetArrayVarNames(ctypes.c_int(i_node)), - ctypes.POINTER(c_char_p)) + var_name_pp = ctypes.cast(NESTGPU_GetArrayVarNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) var_name_list = [] for i in range(n_var): var_name_p = var_name_pp[i] var_name = ctypes.cast(var_name_p, ctypes.c_char_p).value var_name_list.append(to_def_str(var_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return var_name_list - - def SetNeuronStatus(nodes, var_name, val): "Set neuron group scalar or array variable or parameter" - if (type(nodes)!=list) & (type(nodes)!=tuple) & (type(nodes)!=NodeSeq): + if (type(nodes) != list) & (type(nodes) != tuple) & (type(nodes) != NodeSeq): raise ValueError("Unknown node type") - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - if type(nodes)==NodeSeq: + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + if type(nodes) == NodeSeq: if IsNeuronScalParam(nodes.i0, var_name): SetNeuronScalParam(nodes.i0, nodes.n, var_name, val) - elif (IsNeuronPortParam(nodes.i0, var_name) | - IsNeuronArrayParam(nodes.i0, var_name)): + elif IsNeuronPortParam(nodes.i0, var_name) | IsNeuronArrayParam(nodes.i0, var_name): SetNeuronArrayParam(nodes.i0, nodes.n, var_name, val) elif IsNeuronScalVar(nodes.i0, var_name): SetNeuronScalVar(nodes.i0, nodes.n, var_name, val) - elif (IsNeuronPortVar(nodes.i0, var_name) | - IsNeuronArrayVar(nodes.i0, var_name)): + elif IsNeuronPortVar(nodes.i0, var_name) | IsNeuronArrayVar(nodes.i0, var_name): SetNeuronArrayVar(nodes.i0, nodes.n, var_name, val) else: raise ValueError("Unknown neuron variable or parameter") - else: + else: if IsNeuronScalParam(nodes[0], var_name): SetNeuronPtScalParam(nodes, var_name, val) - elif (IsNeuronPortParam(nodes[0], var_name) | - IsNeuronArrayParam(nodes[0], var_name)): + elif IsNeuronPortParam(nodes[0], var_name) | IsNeuronArrayParam(nodes[0], var_name): SetNeuronPtArrayParam(nodes, var_name, val) elif IsNeuronScalVar(nodes[0], var_name): SetNeuronPtScalVar(nodes, var_name, val) - elif (IsNeuronPortVar(nodes[0], var_name) | - IsNeuronArrayVar(nodes[0], var_name)): + elif IsNeuronPortVar(nodes[0], var_name) | IsNeuronArrayVar(nodes[0], var_name): SetNeuronPtArrayVar(nodes, var_name, val) else: raise ValueError("Unknown neuron variable or parameter") @@ -956,6 +1097,8 @@ def SetNeuronStatus(nodes, var_name, val): NESTGPU_Calibrate = _nestgpu.NESTGPU_Calibrate NESTGPU_Calibrate.restype = ctypes.c_int + + def Calibrate(): "Calibrate simulation" ret = NESTGPU_Calibrate() @@ -966,6 +1109,8 @@ def Calibrate(): NESTGPU_Simulate = _nestgpu.NESTGPU_Simulate NESTGPU_Simulate.restype = ctypes.c_int + + def Simulate(sim_time=1000.0): "Simulate neural activity" SetSimTime(sim_time) @@ -978,17 +1123,19 @@ def Simulate(sim_time=1000.0): NESTGPU_ConnectMpiInit = _nestgpu.NESTGPU_ConnectMpiInit NESTGPU_ConnectMpiInit.argtypes = (ctypes.c_int, ctypes.POINTER(c_char_p)) NESTGPU_ConnectMpiInit.restype = ctypes.c_int + + def ConnectMpiInit(): "Initialize MPI connections" from mpi4py import MPI - argc=len(sys.argv) + + argc = len(sys.argv) array_char_pt_type = c_char_p * argc - c_var_name_list=[] + c_var_name_list = [] for i in range(argc): c_arg = ctypes.create_string_buffer(to_byte_str(sys.argv[i]), 100) - c_var_name_list.append(c_arg) - ret = NESTGPU_ConnectMpiInit(ctypes.c_int(argc), - array_char_pt_type(*c_var_name_list)) + c_var_name_list.append(c_arg) + ret = NESTGPU_ConnectMpiInit(ctypes.c_int(argc), array_char_pt_type(*c_var_name_list)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -996,6 +1143,8 @@ def ConnectMpiInit(): NESTGPU_MpiId = _nestgpu.NESTGPU_MpiId NESTGPU_MpiId.restype = ctypes.c_int + + def MpiId(): "Get MPI Id" ret = NESTGPU_MpiId() @@ -1006,6 +1155,8 @@ def MpiId(): NESTGPU_MpiNp = _nestgpu.NESTGPU_MpiNp NESTGPU_MpiNp.restype = ctypes.c_int + + def MpiNp(): "Get MPI Np" ret = NESTGPU_MpiNp() @@ -1016,6 +1167,8 @@ def MpiNp(): NESTGPU_ProcMaster = _nestgpu.NESTGPU_ProcMaster NESTGPU_ProcMaster.restype = ctypes.c_int + + def ProcMaster(): "Get MPI ProcMaster" ret = NESTGPU_ProcMaster() @@ -1026,6 +1179,8 @@ def ProcMaster(): NESTGPU_MpiFinalize = _nestgpu.NESTGPU_MpiFinalize NESTGPU_MpiFinalize.restype = ctypes.c_int + + def MpiFinalize(): "Finalize MPI" ret = NESTGPU_MpiFinalize() @@ -1037,6 +1192,8 @@ def MpiFinalize(): NESTGPU_RandomInt = _nestgpu.NESTGPU_RandomInt NESTGPU_RandomInt.argtypes = (ctypes.c_size_t,) NESTGPU_RandomInt.restype = ctypes.POINTER(ctypes.c_uint) + + def RandomInt(n): "Generate n random integers in CUDA memory" ret = NESTGPU_RandomInt(ctypes.c_size_t(n)) @@ -1048,6 +1205,8 @@ def RandomInt(n): NESTGPU_RandomUniform = _nestgpu.NESTGPU_RandomUniform NESTGPU_RandomUniform.argtypes = (ctypes.c_size_t,) NESTGPU_RandomUniform.restype = c_float_p + + def RandomUniform(n): "Generate n random floats with uniform distribution in (0,1) in CUDA memory" ret = NESTGPU_RandomUniform(ctypes.c_size_t(n)) @@ -1059,26 +1218,36 @@ def RandomUniform(n): NESTGPU_RandomNormal = _nestgpu.NESTGPU_RandomNormal NESTGPU_RandomNormal.argtypes = (ctypes.c_size_t, ctypes.c_float, ctypes.c_float) NESTGPU_RandomNormal.restype = c_float_p + + def RandomNormal(n, mean, stddev): "Generate n random floats with normal distribution in CUDA memory" - ret = NESTGPU_RandomNormal(ctypes.c_size_t(n), ctypes.c_float(mean), - ctypes.c_float(stddev)) + ret = NESTGPU_RandomNormal(ctypes.c_size_t(n), ctypes.c_float(mean), ctypes.c_float(stddev)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_RandomNormalClipped = _nestgpu.NESTGPU_RandomNormalClipped -NESTGPU_RandomNormalClipped.argtypes = (ctypes.c_size_t, ctypes.c_float, ctypes.c_float, ctypes.c_float, - ctypes.c_float) +NESTGPU_RandomNormalClipped.argtypes = ( + ctypes.c_size_t, + ctypes.c_float, + ctypes.c_float, + ctypes.c_float, + ctypes.c_float, +) NESTGPU_RandomNormalClipped.restype = c_float_p + + def RandomNormalClipped(n, mean, stddev, vmin, vmax): "Generate n random floats with normal clipped distribution in CUDA memory" - ret = NESTGPU_RandomNormalClipped(ctypes.c_size_t(n), - ctypes.c_float(mean), - ctypes.c_float(stddev), - ctypes.c_float(vmin), - ctypes.c_float(vmax)) + ret = NESTGPU_RandomNormalClipped( + ctypes.c_size_t(n), + ctypes.c_float(mean), + ctypes.c_float(stddev), + ctypes.c_float(vmin), + ctypes.c_float(vmax), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1087,31 +1256,44 @@ def RandomNormalClipped(n, mean, stddev, vmin, vmax): NESTGPU_ConnectMpiInit = _nestgpu.NESTGPU_ConnectMpiInit NESTGPU_ConnectMpiInit.argtypes = (ctypes.c_int, ctypes.POINTER(c_char_p)) NESTGPU_ConnectMpiInit.restype = ctypes.c_int + + def ConnectMpiInit(): "Initialize MPI connections" from mpi4py import MPI - argc=len(sys.argv) + + argc = len(sys.argv) array_char_pt_type = c_char_p * argc - c_var_name_list=[] + c_var_name_list = [] for i in range(argc): c_arg = ctypes.create_string_buffer(to_byte_str(sys.argv[i]), 100) - c_var_name_list.append(c_arg) - ret = NESTGPU_ConnectMpiInit(ctypes.c_int(argc), - array_char_pt_type(*c_var_name_list)) + c_var_name_list.append(c_arg) + ret = NESTGPU_ConnectMpiInit(ctypes.c_int(argc), array_char_pt_type(*c_var_name_list)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_Connect = _nestgpu.NESTGPU_Connect -NESTGPU_Connect.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_ubyte, ctypes.c_float, ctypes.c_float) +NESTGPU_Connect.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_ubyte, + ctypes.c_float, + ctypes.c_float, +) NESTGPU_Connect.restype = ctypes.c_int + + def SingleConnect(i_source_node, i_target_node, i_port, weight, delay): "Connect two nodes" - ret = NESTGPU_Connect(ctypes.c_int(i_source_node), - ctypes.c_int(i_target_node), - ctypes.c_ubyte(i_port), ctypes.c_float(weight), - ctypes.c_float(delay)) + ret = NESTGPU_Connect( + ctypes.c_int(i_source_node), + ctypes.c_int(i_target_node), + ctypes.c_ubyte(i_port), + ctypes.c_float(weight), + ctypes.c_float(delay), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1119,6 +1301,8 @@ def SingleConnect(i_source_node, i_target_node, i_port, weight, delay): NESTGPU_ConnSpecInit = _nestgpu.NESTGPU_ConnSpecInit NESTGPU_ConnSpecInit.restype = ctypes.c_int + + def ConnSpecInit(): "Initialize connection rules specification" ret = NESTGPU_ConnSpecInit() @@ -1130,9 +1314,11 @@ def ConnSpecInit(): NESTGPU_SetConnSpecParam = _nestgpu.NESTGPU_SetConnSpecParam NESTGPU_SetConnSpecParam.argtypes = (c_char_p, ctypes.c_int) NESTGPU_SetConnSpecParam.restype = ctypes.c_int + + def SetConnSpecParam(param_name, val): "Set connection parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_SetConnSpecParam(c_param_name, ctypes.c_int(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -1142,10 +1328,12 @@ def SetConnSpecParam(param_name, val): NESTGPU_ConnSpecIsParam = _nestgpu.NESTGPU_ConnSpecIsParam NESTGPU_ConnSpecIsParam.argtypes = (c_char_p,) NESTGPU_ConnSpecIsParam.restype = ctypes.c_int + + def ConnSpecIsParam(param_name): "Check name of connection parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_ConnSpecIsParam(c_param_name) != 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_ConnSpecIsParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1153,6 +1341,8 @@ def ConnSpecIsParam(param_name): NESTGPU_SynSpecInit = _nestgpu.NESTGPU_SynSpecInit NESTGPU_SynSpecInit.restype = ctypes.c_int + + def SynSpecInit(): "Initializa synapse specification" ret = NESTGPU_SynSpecInit() @@ -1160,37 +1350,46 @@ def SynSpecInit(): raise ValueError(GetErrorMessage()) return ret + NESTGPU_SetSynSpecIntParam = _nestgpu.NESTGPU_SetSynSpecIntParam NESTGPU_SetSynSpecIntParam.argtypes = (c_char_p, ctypes.c_int) NESTGPU_SetSynSpecIntParam.restype = ctypes.c_int + + def SetSynSpecIntParam(param_name, val): "Set synapse int parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_SetSynSpecIntParam(c_param_name, ctypes.c_int(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_SetSynSpecFloatParam = _nestgpu.NESTGPU_SetSynSpecFloatParam NESTGPU_SetSynSpecFloatParam.argtypes = (c_char_p, ctypes.c_float) NESTGPU_SetSynSpecFloatParam.restype = ctypes.c_int + + def SetSynSpecFloatParam(param_name, val): "Set synapse float parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_SetSynSpecFloatParam(c_param_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_SetSynSpecFloatPtParam = _nestgpu.NESTGPU_SetSynSpecFloatPtParam NESTGPU_SetSynSpecFloatPtParam.argtypes = (c_char_p, ctypes.c_void_p) NESTGPU_SetSynSpecFloatPtParam.restype = ctypes.c_int + + def SetSynSpecFloatPtParam(param_name, arr): "Set synapse pointer to float parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - if (type(arr) is list) | (type(arr) is tuple): - arr = (ctypes.c_float * len(arr))(*arr) - arr_pt = ctypes.cast(arr, ctypes.c_void_p) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + if (type(arr) is list) | (type(arr) is tuple): + arr = (ctypes.c_float * len(arr))(*arr) + arr_pt = ctypes.cast(arr, ctypes.c_void_p) ret = NESTGPU_SetSynSpecFloatPtParam(c_param_name, arr_pt) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -1200,10 +1399,12 @@ def SetSynSpecFloatPtParam(param_name, arr): NESTGPU_SynSpecIsIntParam = _nestgpu.NESTGPU_SynSpecIsIntParam NESTGPU_SynSpecIsIntParam.argtypes = (c_char_p,) NESTGPU_SynSpecIsIntParam.restype = ctypes.c_int + + def SynSpecIsIntParam(param_name): "Check name of synapse int parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_SynSpecIsIntParam(c_param_name) != 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SynSpecIsIntParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1212,10 +1413,12 @@ def SynSpecIsIntParam(param_name): NESTGPU_SynSpecIsFloatParam = _nestgpu.NESTGPU_SynSpecIsFloatParam NESTGPU_SynSpecIsFloatParam.argtypes = (c_char_p,) NESTGPU_SynSpecIsFloatParam.restype = ctypes.c_int + + def SynSpecIsFloatParam(param_name): "Check name of synapse float parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_SynSpecIsFloatParam(c_param_name) != 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SynSpecIsFloatParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1224,10 +1427,12 @@ def SynSpecIsFloatParam(param_name): NESTGPU_SynSpecIsFloatPtParam = _nestgpu.NESTGPU_SynSpecIsFloatPtParam NESTGPU_SynSpecIsFloatPtParam.argtypes = (c_char_p,) NESTGPU_SynSpecIsFloatPtParam.restype = ctypes.c_int + + def SynSpecIsFloatPtParam(param_name): "Check name of synapse pointer to float parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_SynSpecIsFloatPtParam(c_param_name) != 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SynSpecIsFloatPtParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1240,52 +1445,52 @@ def DictToArray(param_dict, array_size): high = 1.0e35 mu = None sigma = None - + for param_name in param_dict: pval = param_dict[param_name] - if param_name=="array": + if param_name == "array": dist_name = "array" arr = pval - elif param_name=="distribution": + elif param_name == "distribution": dist_name = pval - elif param_name=="low": + elif param_name == "low": low = pval - elif param_name=="high": + elif param_name == "high": high = pval - elif param_name=="mu": + elif param_name == "mu": mu = pval - elif param_name=="sigma": + elif param_name == "sigma": sigma = pval else: raise ValueError("Unknown parameter name in dictionary") - if dist_name=="array": + if dist_name == "array": if (type(arr) is list) | (type(arr) is tuple): if len(arr) != array_size: raise ValueError("Wrong array size.") arr = (ctypes.c_float * len(arr))(*arr) - #array_pt = ctypes.cast(arr, ctypes.c_void_p) - #return array_pt + # array_pt = ctypes.cast(arr, ctypes.c_void_p) + # return array_pt return arr - elif dist_name=="normal": + elif dist_name == "normal": return RandomNormal(array_size, mu, sigma) - elif dist_name=="normal_clipped": + elif dist_name == "normal_clipped": return RandomNormalClipped(array_size, mu, sigma, low, high) else: raise ValueError("Unknown distribution") def RuleArraySize(conn_dict, source, target): - if conn_dict["rule"]=="one_to_one": + if conn_dict["rule"] == "one_to_one": array_size = len(source) - elif conn_dict["rule"]=="all_to_all": - array_size = len(source)*len(target) - elif conn_dict["rule"]=="fixed_total_number": + elif conn_dict["rule"] == "all_to_all": + array_size = len(source) * len(target) + elif conn_dict["rule"] == "fixed_total_number": array_size = conn_dict["total_num"] - elif conn_dict["rule"]=="fixed_indegree": - array_size = len(target)*conn_dict["indegree"] - elif conn_dict["rule"]=="fixed_outdegree": - array_size = len(source)*conn_dict["outdegree"] + elif conn_dict["rule"] == "fixed_indegree": + array_size = len(target) * conn_dict["indegree"] + elif conn_dict["rule"] == "fixed_outdegree": + array_size = len(source) * conn_dict["outdegree"] else: raise ValueError("Unknown number of connections for this rule") return array_size @@ -1293,49 +1498,63 @@ def RuleArraySize(conn_dict, source, target): def SetSynParamFromArray(param_name, par_dict, array_size): arr_param_name = param_name + "_array" - if (not SynSpecIsFloatPtParam(arr_param_name)): - raise ValueError("Synapse parameter cannot be set by" - " arrays or distributions") + if not SynSpecIsFloatPtParam(arr_param_name): + raise ValueError("Synapse parameter cannot be set by" " arrays or distributions") arr = DictToArray(par_dict, array_size) array_pt = ctypes.cast(arr, ctypes.c_void_p) SetSynSpecFloatPtParam(arr_param_name, array_pt) - - NESTGPU_ConnectSeqSeq = _nestgpu.NESTGPU_ConnectSeqSeq -NESTGPU_ConnectSeqSeq.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_int, - ctypes.c_int) +NESTGPU_ConnectSeqSeq.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, +) NESTGPU_ConnectSeqSeq.restype = ctypes.c_int NESTGPU_ConnectSeqGroup = _nestgpu.NESTGPU_ConnectSeqGroup -NESTGPU_ConnectSeqGroup.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int) +NESTGPU_ConnectSeqGroup.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, +) NESTGPU_ConnectSeqGroup.restype = ctypes.c_int NESTGPU_ConnectGroupSeq = _nestgpu.NESTGPU_ConnectGroupSeq -NESTGPU_ConnectGroupSeq.argtypes = (ctypes.c_void_p, ctypes.c_int, - ctypes.c_int, ctypes.c_int) +NESTGPU_ConnectGroupSeq.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, +) NESTGPU_ConnectGroupSeq.restype = ctypes.c_int NESTGPU_ConnectGroupGroup = _nestgpu.NESTGPU_ConnectGroupGroup -NESTGPU_ConnectGroupGroup.argtypes = (ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int) +NESTGPU_ConnectGroupGroup.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, +) NESTGPU_ConnectGroupGroup.restype = ctypes.c_int -def Connect(source, target, conn_dict, syn_dict): + +def Connect(source, target, conn_dict, syn_dict): "Connect two node groups" - if (type(source)!=list) & (type(source)!=tuple) & (type(source)!=NodeSeq): + if (type(source) != list) & (type(source) != tuple) & (type(source) != NodeSeq): raise ValueError("Unknown source type") - if (type(target)!=list) & (type(target)!=tuple) & (type(target)!=NodeSeq): + if (type(target) != list) & (type(target) != tuple) & (type(target) != NodeSeq): raise ValueError("Unknown target type") - + ConnSpecInit() SynSpecInit() for param_name in conn_dict: - if param_name=="rule": + if param_name == "rule": for i_rule in range(len(conn_rule_name)): - if conn_dict[param_name]==conn_rule_name[i_rule]: + if conn_dict[param_name] == conn_rule_name[i_rule]: break if i_rule < len(conn_rule_name): SetConnSpecParam(param_name, i_rule) @@ -1345,18 +1564,18 @@ def Connect(source, target, conn_dict, syn_dict): SetConnSpecParam(param_name, conn_dict[param_name]) else: raise ValueError("Unknown connection parameter") - + array_size = RuleArraySize(conn_dict, source, target) - + for param_name in syn_dict: if SynSpecIsIntParam(param_name): val = syn_dict[param_name] - if ((param_name=="synapse_group") & (type(val)==SynGroup)): + if (param_name == "synapse_group") & (type(val) == SynGroup): val = val.i_syn_group SetSynSpecIntParam(param_name, val) elif SynSpecIsFloatParam(param_name): fpar = syn_dict[param_name] - if (type(fpar)==dict): + if type(fpar) == dict: SetSynParamFromArray(param_name, fpar, array_size) else: SetSynSpecFloatParam(param_name, fpar) @@ -1365,123 +1584,150 @@ def Connect(source, target, conn_dict, syn_dict): SetSynSpecFloatPtParam(param_name, syn_dict[param_name]) else: raise ValueError("Unknown synapse parameter") - if (type(source)==NodeSeq) & (type(target)==NodeSeq) : + if (type(source) == NodeSeq) & (type(target) == NodeSeq): ret = NESTGPU_ConnectSeqSeq(source.i0, source.n, target.i0, target.n) else: - if type(source)!=NodeSeq: - source_arr = (ctypes.c_int * len(source))(*source) - source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) - if type(target)!=NodeSeq: - target_arr = (ctypes.c_int * len(target))(*target) - target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) - if (type(source)==NodeSeq) & (type(target)!=NodeSeq): - ret = NESTGPU_ConnectSeqGroup(source.i0, source.n, target_arr_pt, - len(target)) - elif (type(source)!=NodeSeq) & (type(target)==NodeSeq): - ret = NESTGPU_ConnectGroupSeq(source_arr_pt, len(source), - target.i0, target.n) + if type(source) != NodeSeq: + source_arr = (ctypes.c_int * len(source))(*source) + source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) + if type(target) != NodeSeq: + target_arr = (ctypes.c_int * len(target))(*target) + target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) + if (type(source) == NodeSeq) & (type(target) != NodeSeq): + ret = NESTGPU_ConnectSeqGroup(source.i0, source.n, target_arr_pt, len(target)) + elif (type(source) != NodeSeq) & (type(target) == NodeSeq): + ret = NESTGPU_ConnectGroupSeq(source_arr_pt, len(source), target.i0, target.n) else: - ret = NESTGPU_ConnectGroupGroup(source_arr_pt, len(source), - target_arr_pt, len(target)) + ret = NESTGPU_ConnectGroupGroup(source_arr_pt, len(source), target_arr_pt, len(target)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_RemoteConnectSeqSeq = _nestgpu.NESTGPU_RemoteConnectSeqSeq -NESTGPU_RemoteConnectSeqSeq.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int) +NESTGPU_RemoteConnectSeqSeq.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, +) NESTGPU_RemoteConnectSeqSeq.restype = ctypes.c_int NESTGPU_RemoteConnectSeqGroup = _nestgpu.NESTGPU_RemoteConnectSeqGroup -NESTGPU_RemoteConnectSeqGroup.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int) +NESTGPU_RemoteConnectSeqGroup.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, +) NESTGPU_RemoteConnectSeqGroup.restype = ctypes.c_int NESTGPU_RemoteConnectGroupSeq = _nestgpu.NESTGPU_RemoteConnectGroupSeq -NESTGPU_RemoteConnectGroupSeq.argtypes = (ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int) +NESTGPU_RemoteConnectGroupSeq.argtypes = ( + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, +) NESTGPU_RemoteConnectGroupSeq.restype = ctypes.c_int NESTGPU_RemoteConnectGroupGroup = _nestgpu.NESTGPU_RemoteConnectGroupGroup -NESTGPU_RemoteConnectGroupGroup.argtypes = (ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int) +NESTGPU_RemoteConnectGroupGroup.argtypes = ( + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, +) NESTGPU_RemoteConnectGroupGroup.restype = ctypes.c_int -def RemoteConnect(i_source_host, source, i_target_host, target, - conn_dict, syn_dict): + +def RemoteConnect(i_source_host, source, i_target_host, target, conn_dict, syn_dict): "Connect two node groups of differen mpi hosts" - if (type(i_source_host)!=int) | (type(i_target_host)!=int): + if (type(i_source_host) != int) | (type(i_target_host) != int): raise ValueError("Error in host index") - if (type(source)!=list) & (type(source)!=tuple) & (type(source)!=NodeSeq): + if (type(source) != list) & (type(source) != tuple) & (type(source) != NodeSeq): raise ValueError("Unknown source type") - if (type(target)!=list) & (type(target)!=tuple) & (type(target)!=NodeSeq): + if (type(target) != list) & (type(target) != tuple) & (type(target) != NodeSeq): raise ValueError("Unknown target type") - + ConnSpecInit() SynSpecInit() for param_name in conn_dict: - if param_name=="rule": + if param_name == "rule": for i_rule in range(len(conn_rule_name)): - if conn_dict[param_name]==conn_rule_name[i_rule]: + if conn_dict[param_name] == conn_rule_name[i_rule]: break if i_rule < len(conn_rule_name): SetConnSpecParam(param_name, i_rule) else: raise ValueError("Unknown connection rule") - + elif ConnSpecIsParam(param_name): SetConnSpecParam(param_name, conn_dict[param_name]) else: raise ValueError("Unknown connection parameter") - - array_size = RuleArraySize(conn_dict, source, target) - + + array_size = RuleArraySize(conn_dict, source, target) + for param_name in syn_dict: if SynSpecIsIntParam(param_name): SetSynSpecIntParam(param_name, syn_dict[param_name]) elif SynSpecIsFloatParam(param_name): fpar = syn_dict[param_name] - if (type(fpar)==dict): + if type(fpar) == dict: SetSynParamFromArray(param_name, fpar, array_size) else: SetSynSpecFloatParam(param_name, fpar) - + elif SynSpecIsFloatPtParam(param_name): SetSynSpecFloatPtParam(param_name, syn_dict[param_name]) else: raise ValueError("Unknown synapse parameter") - if (type(source)==NodeSeq) & (type(target)==NodeSeq) : - ret = NESTGPU_RemoteConnectSeqSeq(i_source_host, source.i0, source.n, - i_target_host, target.i0, target.n) + if (type(source) == NodeSeq) & (type(target) == NodeSeq): + ret = NESTGPU_RemoteConnectSeqSeq(i_source_host, source.i0, source.n, i_target_host, target.i0, target.n) else: - if type(source)!=NodeSeq: - source_arr = (ctypes.c_int * len(source))(*source) - source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) - if type(target)!=NodeSeq: - target_arr = (ctypes.c_int * len(target))(*target) - target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) - if (type(source)==NodeSeq) & (type(target)!=NodeSeq): - ret = NESTGPU_RemoteConnectSeqGroup(i_source_host, source.i0, - source.n, i_target_host, - target_arr_pt, len(target)) - elif (type(source)!=NodeSeq) & (type(target)==NodeSeq): - ret = NESTGPU_RemoteConnectGroupSeq(i_source_host, source_arr_pt, - len(source), - i_target_host, target.i0, - target.n) + if type(source) != NodeSeq: + source_arr = (ctypes.c_int * len(source))(*source) + source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) + if type(target) != NodeSeq: + target_arr = (ctypes.c_int * len(target))(*target) + target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) + if (type(source) == NodeSeq) & (type(target) != NodeSeq): + ret = NESTGPU_RemoteConnectSeqGroup( + i_source_host, + source.i0, + source.n, + i_target_host, + target_arr_pt, + len(target), + ) + elif (type(source) != NodeSeq) & (type(target) == NodeSeq): + ret = NESTGPU_RemoteConnectGroupSeq( + i_source_host, + source_arr_pt, + len(source), + i_target_host, + target.i0, + target.n, + ) else: - ret = NESTGPU_RemoteConnectGroupGroup(i_source_host, - source_arr_pt, - len(source), - i_target_host, - target_arr_pt, - len(target)) + ret = NESTGPU_RemoteConnectGroupGroup( + i_source_host, + source_arr_pt, + len(source), + i_target_host, + target_arr_pt, + len(target), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1489,19 +1735,19 @@ def RemoteConnect(i_source_host, source, i_target_host, target, def SetStatus(gen_object, params, val=None): "Set neuron or synapse group parameters or variables using dictionaries" - if type(gen_object)==SynGroup: + if type(gen_object) == SynGroup: return SetSynGroupStatus(gen_object, params, val) - nodes = gen_object + nodes = gen_object if val != None: - SetNeuronStatus(nodes, params, val) - elif type(params)==dict: + SetNeuronStatus(nodes, params, val) + elif type(params) == dict: for param_name in params: SetNeuronStatus(nodes, param_name, params[param_name]) - elif (type(params)==list) | (type(params) is tuple): + elif (type(params) == list) | (type(params) is tuple): if len(params) != len(nodes): raise ValueError("List should have the same size as nodes") for param_dict in params: - if type(param_dict)!=dict: + if type(param_dict) != dict: raise ValueError("Type of list elements should be dict") for param_name in param_dict: SetNeuronStatus(nodes, param_name, param_dict[param_name]) @@ -1509,232 +1755,287 @@ def SetStatus(gen_object, params, val=None): raise ValueError("Wrong argument in SetStatus") if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) - -#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + +# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx NESTGPU_GetSeqSeqConnections = _nestgpu.NESTGPU_GetSeqSeqConnections -NESTGPU_GetSeqSeqConnections.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int, - ctypes.c_int, c_int_p) +NESTGPU_GetSeqSeqConnections.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + c_int_p, +) NESTGPU_GetSeqSeqConnections.restype = c_int_p NESTGPU_GetSeqGroupConnections = _nestgpu.NESTGPU_GetSeqGroupConnections -NESTGPU_GetSeqGroupConnections.argtypes = (ctypes.c_int, ctypes.c_int, - c_void_p, ctypes.c_int, - ctypes.c_int, c_int_p) +NESTGPU_GetSeqGroupConnections.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_void_p, + ctypes.c_int, + ctypes.c_int, + c_int_p, +) NESTGPU_GetSeqGroupConnections.restype = c_int_p NESTGPU_GetGroupSeqConnections = _nestgpu.NESTGPU_GetGroupSeqConnections -NESTGPU_GetGroupSeqConnections.argtypes = (c_void_p, ctypes.c_int, - ctypes.c_int, ctypes.c_int, - ctypes.c_int, c_int_p) +NESTGPU_GetGroupSeqConnections.argtypes = ( + c_void_p, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + c_int_p, +) NESTGPU_GetGroupSeqConnections.restype = c_int_p NESTGPU_GetGroupGroupConnections = _nestgpu.NESTGPU_GetGroupGroupConnections -NESTGPU_GetGroupGroupConnections.argtypes = (c_void_p, ctypes.c_int, - c_void_p, ctypes.c_int, - ctypes.c_int, c_int_p) +NESTGPU_GetGroupGroupConnections.argtypes = ( + c_void_p, + ctypes.c_int, + c_void_p, + ctypes.c_int, + ctypes.c_int, + c_int_p, +) NESTGPU_GetGroupGroupConnections.restype = c_int_p -def GetConnections(source=None, target=None, syn_group=-1): + +def GetConnections(source=None, target=None, syn_group=-1): "Get connections between two node groups" - if source==None: + if source == None: source = NodeSeq(None) - if target==None: + if target == None: target = NodeSeq(None) - if (type(source)==int): + if type(source) == int: source = [source] - if (type(target)==int): + if type(target) == int: target = [target] - if (type(source)!=list) & (type(source)!=tuple) & (type(source)!=NodeSeq): + if (type(source) != list) & (type(source) != tuple) & (type(source) != NodeSeq): raise ValueError("Unknown source type") - if (type(target)!=list) & (type(target)!=tuple) & (type(target)!=NodeSeq): + if (type(target) != list) & (type(target) != tuple) & (type(target) != NodeSeq): raise ValueError("Unknown target type") - + n_conn = ctypes.c_int(0) - if (type(source)==NodeSeq) & (type(target)==NodeSeq) : - conn_arr = NESTGPU_GetSeqSeqConnections(source.i0, source.n, - target.i0, target.n, - syn_group, - ctypes.byref(n_conn)) + if (type(source) == NodeSeq) & (type(target) == NodeSeq): + conn_arr = NESTGPU_GetSeqSeqConnections( + source.i0, source.n, target.i0, target.n, syn_group, ctypes.byref(n_conn) + ) else: - if type(source)!=NodeSeq: - source_arr = (ctypes.c_int * len(source))(*source) - source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) - if type(target)!=NodeSeq: - target_arr = (ctypes.c_int * len(target))(*target) - target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) - if (type(source)==NodeSeq) & (type(target)!=NodeSeq): - conn_arr = NESTGPU_GetSeqGroupConnections(source.i0, source.n, - target_arr_pt, - len(target), - syn_group, - ctypes.byref(n_conn)) - elif (type(source)!=NodeSeq) & (type(target)==NodeSeq): - conn_arr = NESTGPU_GetGroupSeqConnections(source_arr_pt, - len(source), - target.i0, target.n, - syn_group, - ctypes.byref(n_conn)) + if type(source) != NodeSeq: + source_arr = (ctypes.c_int * len(source))(*source) + source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) + if type(target) != NodeSeq: + target_arr = (ctypes.c_int * len(target))(*target) + target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) + if (type(source) == NodeSeq) & (type(target) != NodeSeq): + conn_arr = NESTGPU_GetSeqGroupConnections( + source.i0, + source.n, + target_arr_pt, + len(target), + syn_group, + ctypes.byref(n_conn), + ) + elif (type(source) != NodeSeq) & (type(target) == NodeSeq): + conn_arr = NESTGPU_GetGroupSeqConnections( + source_arr_pt, + len(source), + target.i0, + target.n, + syn_group, + ctypes.byref(n_conn), + ) else: - conn_arr = NESTGPU_GetGroupGroupConnections(source_arr_pt, - len(source), - target_arr_pt, - len(target), - syn_group, - ctypes.byref(n_conn)) + conn_arr = NESTGPU_GetGroupGroupConnections( + source_arr_pt, + len(source), + target_arr_pt, + len(target), + syn_group, + ctypes.byref(n_conn), + ) conn_list = [] for i_conn in range(n_conn.value): - conn_id = ConnectionId(conn_arr[i_conn*3], conn_arr[i_conn*3 + 1], - conn_arr[i_conn*3 + 2]) + conn_id = ConnectionId(conn_arr[i_conn * 3], conn_arr[i_conn * 3 + 1], conn_arr[i_conn * 3 + 2]) conn_list.append(conn_id) - + ret = conn_list if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_GetConnectionStatus = _nestgpu.NESTGPU_GetConnectionStatus -NESTGPU_GetConnectionStatus.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_int, c_int_p, - c_char_p, c_char_p, - c_float_p, c_float_p) +NESTGPU_GetConnectionStatus.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + c_int_p, + c_char_p, + c_char_p, + c_float_p, + c_float_p, +) NESTGPU_GetConnectionStatus.restype = ctypes.c_int + def GetConnectionStatus(conn_id): i_source = conn_id.i_source i_group = conn_id.i_group i_conn = conn_id.i_conn - + i_target = ctypes.c_int(0) i_port = ctypes.c_char() i_syn = ctypes.c_char() delay = ctypes.c_float(0.0) weight = ctypes.c_float(0.0) - NESTGPU_GetConnectionStatus(i_source, i_group, i_conn, - ctypes.byref(i_target), - ctypes.byref(i_port), - ctypes.byref(i_syn), - ctypes.byref(delay), - ctypes.byref(weight)) + NESTGPU_GetConnectionStatus( + i_source, + i_group, + i_conn, + ctypes.byref(i_target), + ctypes.byref(i_port), + ctypes.byref(i_syn), + ctypes.byref(delay), + ctypes.byref(weight), + ) i_target = i_target.value i_port = ord(i_port.value) i_syn = ord(i_syn.value) delay = delay.value weight = weight.value - conn_status_dict = {"source":i_source, "target":i_target, "port":i_port, - "syn":i_syn, "delay":delay, "weight":weight} + conn_status_dict = { + "source": i_source, + "target": i_target, + "port": i_port, + "syn": i_syn, + "delay": delay, + "weight": weight, + } return conn_status_dict def GetStatus(gen_object, var_key=None): "Get neuron group, connection or synapse group status" - if type(gen_object)==SynGroup: + if type(gen_object) == SynGroup: return GetSynGroupStatus(gen_object, var_key) - - if type(gen_object)==NodeSeq: + + if type(gen_object) == NodeSeq: gen_object = gen_object.ToList() - if (type(gen_object)==list) | (type(gen_object)==tuple): + if (type(gen_object) == list) | (type(gen_object) == tuple): status_list = [] for gen_elem in gen_object: elem_dict = GetStatus(gen_elem, var_key) status_list.append(elem_dict) return status_list - if (type(var_key)==list) | (type(var_key)==tuple): + if (type(var_key) == list) | (type(var_key) == tuple): status_list = [] for var_elem in var_key: var_value = GetStatus(gen_object, var_elem) status_list.append(var_value) return status_list - elif (var_key==None): - if (type(gen_object)==ConnectionId): + elif var_key == None: + if type(gen_object) == ConnectionId: status_dict = GetConnectionStatus(gen_object) - elif (type(gen_object)==int): + elif type(gen_object) == int: i_node = gen_object status_dict = {} - name_list = GetScalVarNames(i_node) + GetScalParamNames(i_node) \ - + GetPortVarNames(i_node) + GetPortParamNames(i_node) \ - + GetArrayVarNames(i_node) + GetArrayParamNames(i_node) + name_list = ( + GetScalVarNames(i_node) + + GetScalParamNames(i_node) + + GetPortVarNames(i_node) + + GetPortParamNames(i_node) + + GetArrayVarNames(i_node) + + GetArrayParamNames(i_node) + ) for var_name in name_list: val = GetStatus(i_node, var_name) status_dict[var_name] = val else: raise ValueError("Unknown object type in GetStatus") return status_dict - elif (type(var_key)==str) | (type(var_key)==bytes): - if (type(gen_object)==ConnectionId): + elif (type(var_key) == str) | (type(var_key) == bytes): + if type(gen_object) == ConnectionId: status_dict = GetConnectionStatus(gen_object) return status_dict[var_key] - elif (type(gen_object)==int): + elif type(gen_object) == int: i_node = gen_object return GetNeuronStatus([i_node], var_key)[0] else: raise ValueError("Unknown object type in GetStatus") - + else: raise ValueError("Unknown key type in GetStatus", type(var_key)) - NESTGPU_CreateSynGroup = _nestgpu.NESTGPU_CreateSynGroup NESTGPU_CreateSynGroup.argtypes = (c_char_p,) NESTGPU_CreateSynGroup.restype = ctypes.c_int + + def CreateSynGroup(model_name, status_dict=None): "Create a synapse group" - if (type(status_dict)==dict): + if type(status_dict) == dict: syn_group = CreateSynGroup(model_name) SetStatus(syn_group, status_dict) return syn_group - elif status_dict!=None: + elif status_dict != None: raise ValueError("Wrong argument in CreateSynGroup") - c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), \ - len(model_name)+1) - i_syn_group = NESTGPU_CreateSynGroup(c_model_name) + c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), len(model_name) + 1) + i_syn_group = NESTGPU_CreateSynGroup(c_model_name) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return SynGroup(i_syn_group) - + NESTGPU_GetSynGroupNParam = _nestgpu.NESTGPU_GetSynGroupNParam NESTGPU_GetSynGroupNParam.argtypes = (ctypes.c_int,) NESTGPU_GetSynGroupNParam.restype = ctypes.c_int + + def GetSynGroupNParam(syn_group): "Get number of synapse parameters for a given synapse group" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in GetSynGroupNParam") i_syn_group = syn_group.i_syn_group - + ret = NESTGPU_GetSynGroupNParam(ctypes.c_int(i_syn_group)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_GetSynGroupParamNames = _nestgpu.NESTGPU_GetSynGroupParamNames NESTGPU_GetSynGroupParamNames.argtypes = (ctypes.c_int,) NESTGPU_GetSynGroupParamNames.restype = ctypes.POINTER(c_char_p) + + def GetSynGroupParamNames(syn_group): "Get list of synapse group parameter names" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in GetSynGroupParamNames") i_syn_group = syn_group.i_syn_group n_param = GetSynGroupNParam(syn_group) - param_name_pp = ctypes.cast(NESTGPU_GetSynGroupParamNames( - ctypes.c_int(i_syn_group)), ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast( + NESTGPU_GetSynGroupParamNames(ctypes.c_int(i_syn_group)), + ctypes.POINTER(c_char_p), + ) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list @@ -1743,93 +2044,93 @@ def GetSynGroupParamNames(syn_group): NESTGPU_IsSynGroupParam = _nestgpu.NESTGPU_IsSynGroupParam NESTGPU_IsSynGroupParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsSynGroupParam.restype = ctypes.c_int + + def IsSynGroupParam(syn_group, param_name): "Check name of synapse group parameter" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in IsSynGroupParam") i_syn_group = syn_group.i_syn_group - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = (NESTGPU_IsSynGroupParam(ctypes.c_int(i_syn_group), \ - c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsSynGroupParam(ctypes.c_int(i_syn_group), c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_GetSynGroupParam = _nestgpu.NESTGPU_GetSynGroupParam NESTGPU_GetSynGroupParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetSynGroupParam.restype = ctypes.c_float + + def GetSynGroupParam(syn_group, param_name): "Get synapse group parameter value" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in GetSynGroupParam") i_syn_group = syn_group.i_syn_group - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + + ret = NESTGPU_GetSynGroupParam(ctypes.c_int(i_syn_group), c_param_name) - ret = NESTGPU_GetSynGroupParam(ctypes.c_int(i_syn_group), - c_param_name) - if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_SetSynGroupParam = _nestgpu.NESTGPU_SetSynGroupParam -NESTGPU_SetSynGroupParam.argtypes = (ctypes.c_int, c_char_p, - ctypes.c_float) +NESTGPU_SetSynGroupParam.argtypes = (ctypes.c_int, c_char_p, ctypes.c_float) NESTGPU_SetSynGroupParam.restype = ctypes.c_int + + def SetSynGroupParam(syn_group, param_name, val): "Set synapse group parameter value" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in SetSynGroupParam") i_syn_group = syn_group.i_syn_group - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = NESTGPU_SetSynGroupParam(ctypes.c_int(i_syn_group), - c_param_name, ctypes.c_float(val)) - + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SetSynGroupParam(ctypes.c_int(i_syn_group), c_param_name, ctypes.c_float(val)) + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + def GetSynGroupStatus(syn_group, var_key=None): "Get synapse group status" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in GetSynGroupStatus") - if (type(var_key)==list) | (type(var_key)==tuple): + if (type(var_key) == list) | (type(var_key) == tuple): status_list = [] for var_elem in var_key: var_value = GetSynGroupStatus(syn_group, var_elem) status_list.append(var_value) return status_list - elif (var_key==None): + elif var_key == None: status_dict = {} name_list = GetSynGroupParamNames(syn_group) for param_name in name_list: val = GetSynGroupStatus(syn_group, param_name) status_dict[param_name] = val return status_dict - elif (type(var_key)==str) | (type(var_key)==bytes): - return GetSynGroupParam(syn_group, var_key) + elif (type(var_key) == str) | (type(var_key) == bytes): + return GetSynGroupParam(syn_group, var_key) else: raise ValueError("Unknown key type in GetSynGroupStatus", type(var_key)) + def SetSynGroupStatus(syn_group, params, val=None): "Set synapse group parameters using dictionaries" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in SetSynGroupStatus") - if ((type(params)==dict) & (val==None)): + if (type(params) == dict) & (val == None): for param_name in params: SetSynGroupStatus(syn_group, param_name, params[param_name]) - elif (type(params)==str): - return SetSynGroupParam(syn_group, params, val) + elif type(params) == str: + return SetSynGroupParam(syn_group, params, val) else: - raise ValueError("Wrong argument in SetSynGroupStatus") + raise ValueError("Wrong argument in SetSynGroupStatus") if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) - diff --git a/python/Potjans_2014/README.rst b/python/Potjans_2014/README.rst index 6fff2a8dc..58e7933d5 100644 --- a/python/Potjans_2014/README.rst +++ b/python/Potjans_2014/README.rst @@ -98,7 +98,7 @@ References .. [1] Potjans TC. and Diesmann M. 2014. The cell-type specific cortical microcircuit: relating structure and activity in a full-scale spiking network model. Cerebral Cortex. 24(3):785–806. DOI: `10.1093/cercor/bhs358 <https://doi.org/10.1093/cercor/bhs358>`__. - + .. [2] van Albada SJ., Rowley AG., Senk J., Hopkins M., Schmidt M., Stokes AB., Lester DR., Diesmann M. and Furber SB. 2018. Performance Comparison of the Digital Neuromorphic Hardware SpiNNaker and the Neural Network Simulation Software NEST for a Full-Scale Cortical Microcircuit Model. diff --git a/python/Potjans_2014/eval_microcircuit_time.py b/python/Potjans_2014/eval_microcircuit_time.py index 3c2fa89b9..9dbb3956f 100644 --- a/python/Potjans_2014/eval_microcircuit_time.py +++ b/python/Potjans_2014/eval_microcircuit_time.py @@ -32,13 +32,16 @@ ############################################################################### # Import the necessary modules and start the time measurements. -from stimulus_params import stim_dict -from network_params import net_dict -from sim_params_norec import sim_dict +import time + import network -#import nestgpu as ngpu + +# import nestgpu as ngpu import numpy as np -import time +from network_params import net_dict +from sim_params_norec import sim_dict +from stimulus_params import stim_dict + time_start = time.time() ############################################################################### @@ -60,10 +63,10 @@ net.connect() time_connect = time.time() -net.simulate(sim_dict['t_presim']) +net.simulate(sim_dict["t_presim"]) time_presimulate = time.time() -net.simulate(sim_dict['t_sim']) +net.simulate(sim_dict["t_sim"]) time_simulate = time.time() @@ -72,22 +75,11 @@ # data evaluation and print calls. print( - '\nTimes:\n' + # of Rank {}:\n'.format( .Rank()) + - ' Total time: {:.3f} s\n'.format( - time_simulate - - time_start) + - ' Time to initialize: {:.3f} s\n'.format( - time_network - - time_start) + - ' Time to create: {:.3f} s\n'.format( - time_create - - time_network) + - ' Time to connect: {:.3f} s\n'.format( - time_connect - - time_create) + - ' Time to calibrate: {:.3f} s\n'.format( - time_presimulate - - time_connect) + - ' Time to simulate: {:.3f} s\n'.format( - time_simulate - - time_presimulate) ) + "\nTimes:\n" + + " Total time: {:.3f} s\n".format(time_simulate - time_start) # of Rank {}:\n'.format( .Rank()) + + + " Time to initialize: {:.3f} s\n".format(time_network - time_start) + + " Time to create: {:.3f} s\n".format(time_create - time_network) + + " Time to connect: {:.3f} s\n".format(time_connect - time_create) + + " Time to calibrate: {:.3f} s\n".format(time_presimulate - time_connect) + + " Time to simulate: {:.3f} s\n".format(time_simulate - time_presimulate) +) diff --git a/python/Potjans_2014/helpers.py b/python/Potjans_2014/helpers.py index 00f1a3f95..ac7f89b01 100644 --- a/python/Potjans_2014/helpers.py +++ b/python/Potjans_2014/helpers.py @@ -29,14 +29,17 @@ """ -from matplotlib.patches import Polygon -import matplotlib.pyplot as plt import os import sys + +import matplotlib.pyplot as plt import numpy as np -if 'DISPLAY' not in os.environ: +from matplotlib.patches import Polygon + +if "DISPLAY" not in os.environ: import matplotlib - matplotlib.use('Agg') + + matplotlib.use("Agg") def num_synapses_from_conn_probs(conn_probs, popsize1, popsize2): @@ -60,12 +63,12 @@ def num_synapses_from_conn_probs(conn_probs, popsize1, popsize2): Matrix of synapse numbers. """ prod = np.outer(popsize1, popsize2) - num_synapses = np.log(1. - conn_probs) / np.log((prod - 1.) / prod) + num_synapses = np.log(1.0 - conn_probs) / np.log((prod - 1.0) / prod) return num_synapses def postsynaptic_potential_to_current(C_m, tau_m, tau_syn): - """ Computes a factor to convert postsynaptic potentials to currents. + """Computes a factor to convert postsynaptic potentials to currents. The time course of the postsynaptic potential ``v`` is computed as :math: `v(t)=(i*h)(t)` @@ -108,16 +111,16 @@ def postsynaptic_potential_to_current(C_m, tau_m, tau_syn): (in pA). """ - sub = 1. / (tau_syn - tau_m) + sub = 1.0 / (tau_syn - tau_m) pre = tau_m * tau_syn / C_m * sub frac = (tau_m / tau_syn) ** sub - PSC_over_PSP = 1. / (pre * (frac**tau_m - frac**tau_syn)) + PSC_over_PSP = 1.0 / (pre * (frac**tau_m - frac**tau_syn)) return PSC_over_PSP def dc_input_compensating_poisson(bg_rate, K_ext, tau_syn, PSC_ext): - """ Computes DC input if no Poisson input is provided to the microcircuit. + """Computes DC input if no Poisson input is provided to the microcircuit. Parameters ---------- @@ -140,18 +143,19 @@ def dc_input_compensating_poisson(bg_rate, K_ext, tau_syn, PSC_ext): def adjust_weights_and_input_to_synapse_scaling( - full_num_neurons, - full_num_synapses, - K_scaling, - mean_PSC_matrix, - PSC_ext, - tau_syn, - full_mean_rates, - DC_amp, - poisson_input, - bg_rate, - K_ext): - """ Adjusts weights and external input to scaling of indegrees. + full_num_neurons, + full_num_synapses, + K_scaling, + mean_PSC_matrix, + PSC_ext, + tau_syn, + full_mean_rates, + DC_amp, + poisson_input, + bg_rate, + K_ext, +): + """Adjusts weights and external input to scaling of indegrees. The recurrent and external weights are adjusted to the scaling of the indegrees. Extra DC input is added to compensate for the @@ -196,22 +200,19 @@ def adjust_weights_and_input_to_synapse_scaling( PSC_ext_new = PSC_ext / np.sqrt(K_scaling) # recurrent input of full network - indegree_matrix = \ - full_num_synapses / full_num_neurons[:, np.newaxis] - input_rec = np.sum(mean_PSC_matrix * indegree_matrix * full_mean_rates, - axis=1) + indegree_matrix = full_num_synapses / full_num_neurons[:, np.newaxis] + input_rec = np.sum(mean_PSC_matrix * indegree_matrix * full_mean_rates, axis=1) - DC_amp_new = DC_amp \ - + 0.001 * tau_syn * (1. - np.sqrt(K_scaling)) * input_rec + DC_amp_new = DC_amp + 0.001 * tau_syn * (1.0 - np.sqrt(K_scaling)) * input_rec if poisson_input: input_ext = PSC_ext * K_ext * bg_rate - DC_amp_new += 0.001 * tau_syn * (1. - np.sqrt(K_scaling)) * input_ext + DC_amp_new += 0.001 * tau_syn * (1.0 - np.sqrt(K_scaling)) * input_ext return PSC_matrix_new, PSC_ext_new, DC_amp_new def plot_raster(path, name, begin, end, N_scaling): - """ Creates a spike raster plot of the network activity. + """Creates a spike raster plot of the network activity. Parameters ----------- @@ -232,34 +233,33 @@ def plot_raster(path, name, begin, end, N_scaling): """ fs = 18 # fontsize - ylabels = ['L2/3', 'L4', 'L5', 'L6'] - color_list = np.tile(['#595289', '#af143c'], 4) + ylabels = ["L2/3", "L4", "L5", "L6"] + color_list = np.tile(["#595289", "#af143c"], 4) sd_names, node_ids, data = __load_spike_times(path, name, begin, end) last_node_id = node_ids[-1, -1] mod_node_ids = np.abs(node_ids - last_node_id) + 1 - label_pos = [(mod_node_ids[i, 0] + mod_node_ids[i + 1, 1]) / - 2. for i in np.arange(0, 8, 2)] + label_pos = [(mod_node_ids[i, 0] + mod_node_ids[i + 1, 1]) / 2.0 for i in np.arange(0, 8, 2)] stp = 1 if N_scaling > 0.1: - stp = int(10. * N_scaling) - print(' Only spikes of neurons in steps of {} are shown.'.format(stp)) + stp = int(10.0 * N_scaling) + print(" Only spikes of neurons in steps of {} are shown.".format(stp)) plt.figure(figsize=(8, 6)) for i, n in enumerate(sd_names): - times = data[i]['time_ms'] - neurons = np.abs(data[i]['sender'] - last_node_id) + 1 - plt.plot(times[::stp], neurons[::stp], '.', color=color_list[i]) - plt.xlabel('time [ms]', fontsize=fs) + times = data[i]["time_ms"] + neurons = np.abs(data[i]["sender"] - last_node_id) + 1 + plt.plot(times[::stp], neurons[::stp], ".", color=color_list[i]) + plt.xlabel("time [ms]", fontsize=fs) plt.xticks(fontsize=fs) plt.yticks(label_pos, ylabels, fontsize=fs) - plt.savefig(os.path.join(path, 'raster_plot.png'), dpi=300) + plt.savefig(os.path.join(path, "raster_plot.png"), dpi=300) def firing_rates(path, name, begin, end): - """ Computes mean and standard deviation of firing rates per population. + """Computes mean and standard deviation of firing rates per population. The firing rate of each neuron in each population is computed and stored in a .dat file in the directory of the spike detectors. The mean firing @@ -285,23 +285,21 @@ def firing_rates(path, name, begin, end): all_mean_rates = [] all_std_rates = [] for i, n in enumerate(sd_names): - senders = data[i]['sender'] + senders = data[i]["sender"] # 1 more bin than node ids per population bins = np.arange(node_ids[i, 0], node_ids[i, 1] + 2) spike_count_per_neuron, _ = np.histogram(senders, bins=bins) - rate_per_neuron = spike_count_per_neuron * 1000. / (end - begin) - np.savetxt(os.path.join(path, ('rate' + str(i) + '.dat')), - rate_per_neuron) + rate_per_neuron = spike_count_per_neuron * 1000.0 / (end - begin) + np.savetxt(os.path.join(path, ("rate" + str(i) + ".dat")), rate_per_neuron) # zeros are included all_mean_rates.append(np.mean(rate_per_neuron)) all_std_rates.append(np.std(rate_per_neuron)) - print('Mean rates: {} spikes/s'.format(np.around(all_mean_rates, decimals=3))) - print('Standard deviation of rates: {} spikes/s'.format( - np.around(all_std_rates, decimals=3))) + print("Mean rates: {} spikes/s".format(np.around(all_mean_rates, decimals=3))) + print("Standard deviation of rates: {} spikes/s".format(np.around(all_std_rates, decimals=3))) def boxplot(path, populations): - """ Creates a boxblot of the firing rates of all populations. + """Creates a boxblot of the firing rates of all populations. To create the boxplot, the firing rates of each neuron in each population need to be computed with the function ``firing_rate()``. @@ -319,29 +317,36 @@ def boxplot(path, populations): """ fs = 18 - pop_names = [string.replace('23', '2/3') for string in populations] + pop_names = [string.replace("23", "2/3") for string in populations] label_pos = list(range(len(populations), 0, -1)) - color_list = ['#af143c', '#595289'] - medianprops = dict(linestyle='-', linewidth=2.5, color='black') - meanprops = dict(linestyle='--', linewidth=2.5, color='lightgray') + color_list = ["#af143c", "#595289"] + medianprops = dict(linestyle="-", linewidth=2.5, color="black") + meanprops = dict(linestyle="--", linewidth=2.5, color="lightgray") rates_per_neuron_rev = [] for i in np.arange(len(populations))[::-1]: - rates_per_neuron_rev.append( - np.loadtxt(os.path.join(path, ('rate' + str(i) + '.dat')))) + rates_per_neuron_rev.append(np.loadtxt(os.path.join(path, ("rate" + str(i) + ".dat")))) plt.figure(figsize=(8, 6)) - bp = plt.boxplot(rates_per_neuron_rev, 0, 'rs', 0, medianprops=medianprops, - meanprops=meanprops, meanline=True, showmeans=True) - plt.setp(bp['boxes'], color='black') - plt.setp(bp['whiskers'], color='black') - plt.setp(bp['fliers'], color='red', marker='+') + bp = plt.boxplot( + rates_per_neuron_rev, + 0, + "rs", + 0, + medianprops=medianprops, + meanprops=meanprops, + meanline=True, + showmeans=True, + ) + plt.setp(bp["boxes"], color="black") + plt.setp(bp["whiskers"], color="black") + plt.setp(bp["fliers"], color="red", marker="+") # boxcolors for i in np.arange(len(populations)): boxX = [] boxY = [] - box = bp['boxes'][i] + box = bp["boxes"][i] for j in list(range(5)): boxX.append(box.get_xdata()[j]) boxY.append(box.get_ydata()[j]) @@ -349,14 +354,14 @@ def boxplot(path, populations): k = i % 2 boxPolygon = Polygon(boxCoords, facecolor=color_list[k]) plt.gca().add_patch(boxPolygon) - plt.xlabel('firing rate [spikes/s]', fontsize=fs) + plt.xlabel("firing rate [spikes/s]", fontsize=fs) plt.yticks(label_pos, pop_names, fontsize=fs) plt.xticks(fontsize=fs) - plt.savefig(os.path.join(path, 'box_plot.png'), dpi=300) + plt.savefig(os.path.join(path, "box_plot.png"), dpi=300) def __gather_metadata(path): - """ Reads first and last ids of + """Reads first and last ids of neurons in each population. Parameters @@ -371,16 +376,16 @@ def __gather_metadata(path): """ # load node IDs - node_idfile = open(path + 'population_nodeids.dat', 'r') + node_idfile = open(path + "population_nodeids.dat", "r") node_ids = [] for l in node_idfile: node_ids.append(l.split()) - node_ids = np.array(node_ids, dtype='i4') + node_ids = np.array(node_ids, dtype="i4") return node_ids def __load_spike_times(path, name, begin, end): - """ Loads spike times of each spike detector. + """Loads spike times of each spike detector. Parameters ---------- @@ -402,20 +407,19 @@ def __load_spike_times(path, name, begin, end): """ node_ids = __gather_metadata(path) data = {} - dtype = {'names': ('sender', 'time_ms'), # as in header - 'formats': ('i4', 'f8')} - #print(node_ids) + dtype = {"names": ("sender", "time_ms"), "formats": ("i4", "f8")} # as in header + # print(node_ids) sd_names = {} - + for i_pop in range(8): - fn = os.path.join(path, 'spike_times_' + str(i_pop) + '.dat') + fn = os.path.join(path, "spike_times_" + str(i_pop) + ".dat") data_i_raw = np.loadtxt(fn, skiprows=1, dtype=dtype) - data_i_raw = np.sort(data_i_raw, order='time_ms') + data_i_raw = np.sort(data_i_raw, order="time_ms") # begin and end are included if they exist - low = np.searchsorted(data_i_raw['time_ms'], v=begin, side='left') - high = np.searchsorted(data_i_raw['time_ms'], v=end, side='right') + low = np.searchsorted(data_i_raw["time_ms"], v=begin, side="left") + high = np.searchsorted(data_i_raw["time_ms"], v=end, side="right") data[i_pop] = data_i_raw[low:high] - sd_names[i_pop] = 'spike_times_' + str(i_pop) + sd_names[i_pop] = "spike_times_" + str(i_pop) return sd_names, node_ids, data diff --git a/python/Potjans_2014/network.py b/python/Potjans_2014/network.py index 268ce6f2d..1c0b17003 100644 --- a/python/Potjans_2014/network.py +++ b/python/Potjans_2014/network.py @@ -27,13 +27,14 @@ """ import os -import numpy as np -import nestgpu as ngpu + import helpers +import nestgpu as ngpu +import numpy as np class Network: - """ Provides functions to setup NEST GPU, to create and connect all nodes + """Provides functions to setup NEST GPU, to create and connect all nodes of the network, to simulate, and to evaluate the resulting spike data. Instantiating a Network object derives dependent parameters and already @@ -60,17 +61,16 @@ def __init__(self, sim_dict, net_dict, stim_dict=None): self.Rank = 0 # data directory - self.data_path = sim_dict['data_path'] + self.data_path = sim_dict["data_path"] if self.Rank == 0: if os.path.isdir(self.data_path): - message = ' Directory already existed.' - if self.sim_dict['overwrite_files']: - message += ' Old data will be overwritten.' + message = " Directory already existed." + if self.sim_dict["overwrite_files"]: + message += " Old data will be overwritten." else: os.mkdir(self.data_path) - message = ' Directory has been created.' - print('Data will be written to: {}\n{}\n'.format(self.data_path, - message)) + message = " Directory has been created." + print("Data will be written to: {}\n{}\n".format(self.data_path, message)) # derive parameters based on input dictionaries self.__derive_parameters() @@ -79,23 +79,23 @@ def __init__(self, sim_dict, net_dict, stim_dict=None): self.__setup_ngpu() def create(self): - """ Creates all network nodes. + """Creates all network nodes. Neuronal populations and recording and stimulating devices are created. """ self.__create_neuronal_populations() - if len(self.sim_dict['rec_dev']) > 0: + if len(self.sim_dict["rec_dev"]) > 0: self.__create_recording_devices() - if self.net_dict['poisson_input']: + if self.net_dict["poisson_input"]: self.__create_poisson_bg_input() - if self.stim_dict['thalamic_input']: + if self.stim_dict["thalamic_input"]: self.__create_thalamic_stim_input() - if self.stim_dict['dc_input']: + if self.stim_dict["dc_input"]: self.__create_dc_stim_input() def connect(self): - """ Connects the network. + """Connects the network. Recurrent connections among neurons of the neuronal populations are established, and recording and stimulating devices are connected. @@ -113,20 +113,20 @@ def connect(self): """ self.__connect_neuronal_populations() - #if len(self.sim_dict['rec_dev']) > 0: + # if len(self.sim_dict['rec_dev']) > 0: # self.__connect_recording_devices() - if self.net_dict['poisson_input']: + if self.net_dict["poisson_input"]: self.__connect_poisson_bg_input() - if self.stim_dict['thalamic_input']: + if self.stim_dict["thalamic_input"]: self.__connect_thalamic_stim_input() - if self.stim_dict['dc_input']: + if self.stim_dict["dc_input"]: self.__connect_dc_stim_input() - #ngpu.Prepare() - #ngpu.Cleanup() + # ngpu.Prepare() + # ngpu.Cleanup() def simulate(self, t_sim): - """ Simulates the microcircuit. + """Simulates the microcircuit. Parameters ---------- @@ -135,12 +135,12 @@ def simulate(self, t_sim): """ if self.Rank == 0: - print('Simulating {} ms.'.format(t_sim)) + print("Simulating {} ms.".format(t_sim)) ngpu.Simulate(t_sim) def evaluate(self, raster_plot_interval, firing_rates_interval): - """ Displays simulation results. + """Displays simulation results. Creates a spike raster plot. Calculates the firing rate of each population and displays them as a @@ -160,95 +160,105 @@ def evaluate(self, raster_plot_interval, firing_rates_interval): None """ - + spike_times_net = ngpu.GetRecSpikeTimes(self.neurons) popid = 0 for i_pop in range(len(self.pops)): population = self.pops[i_pop] data = [] - spike_times_list = spike_times_net[popid:popid+len(population)] + spike_times_list = spike_times_net[popid : popid + len(population)] popid += len(population) for i_neur in range(len(population)): spike_times = spike_times_list[i_neur] - if (len(spike_times) != 0): + if len(spike_times) != 0: # print("i_pop:", i_pop, " i_neur:", i_neur, " n_spikes:", # len(spike_times)) for t in spike_times: data.append([population[i_neur], t]) arr = np.array(data) - fn = os.path.join(self.data_path, 'spike_times_' + str(i_pop) + - '.dat') - fmt='%d\t%.3f' - np.savetxt(fn, arr, fmt=fmt, header="sender time_ms", - comments='') + fn = os.path.join(self.data_path, "spike_times_" + str(i_pop) + ".dat") + fmt = "%d\t%.3f" + np.savetxt(fn, arr, fmt=fmt, header="sender time_ms", comments="") if self.Rank == 0: - print('Interval to plot spikes: {} ms'.format(raster_plot_interval)) + print("Interval to plot spikes: {} ms".format(raster_plot_interval)) helpers.plot_raster( self.data_path, - 'spike_detector', + "spike_detector", raster_plot_interval[0], raster_plot_interval[1], - self.net_dict['N_scaling']) + self.net_dict["N_scaling"], + ) - print('Interval to compute firing rates: {} ms'.format( - firing_rates_interval)) + print("Interval to compute firing rates: {} ms".format(firing_rates_interval)) helpers.firing_rates( - self.data_path, 'spike_detector', - firing_rates_interval[0], firing_rates_interval[1]) - helpers.boxplot(self.data_path, self.net_dict['populations']) + self.data_path, + "spike_detector", + firing_rates_interval[0], + firing_rates_interval[1], + ) + helpers.boxplot(self.data_path, self.net_dict["populations"]) def __derive_parameters(self): """ Derives and adjusts parameters and stores them as class attributes. """ - self.num_pops = len(self.net_dict['populations']) + self.num_pops = len(self.net_dict["populations"]) # total number of synapses between neuronal populations before scaling full_num_synapses = helpers.num_synapses_from_conn_probs( - self.net_dict['conn_probs'], - self.net_dict['full_num_neurons'], - self.net_dict['full_num_neurons']) + self.net_dict["conn_probs"], + self.net_dict["full_num_neurons"], + self.net_dict["full_num_neurons"], + ) # scaled numbers of neurons and synapses - self.num_neurons = np.round((self.net_dict['full_num_neurons'] * - self.net_dict['N_scaling'])).astype(int) - self.num_synapses = np.round((full_num_synapses * - self.net_dict['N_scaling'] * - self.net_dict['K_scaling'])).astype(int) - self.ext_indegrees = np.round((self.net_dict['K_ext'] * - self.net_dict['K_scaling'])).astype(int) + self.num_neurons = np.round((self.net_dict["full_num_neurons"] * self.net_dict["N_scaling"])).astype(int) + self.num_synapses = np.round( + (full_num_synapses * self.net_dict["N_scaling"] * self.net_dict["K_scaling"]) + ).astype(int) + self.ext_indegrees = np.round((self.net_dict["K_ext"] * self.net_dict["K_scaling"])).astype(int) # conversion from PSPs to PSCs PSC_over_PSP = helpers.postsynaptic_potential_to_current( - self.net_dict['neuron_params']['C_m'], - self.net_dict['neuron_params']['tau_m'], - self.net_dict['neuron_params']['tau_syn']) - PSC_matrix_mean = self.net_dict['PSP_matrix_mean'] * PSC_over_PSP - PSC_ext = self.net_dict['PSP_exc_mean'] * PSC_over_PSP + self.net_dict["neuron_params"]["C_m"], + self.net_dict["neuron_params"]["tau_m"], + self.net_dict["neuron_params"]["tau_syn"], + ) + PSC_matrix_mean = self.net_dict["PSP_matrix_mean"] * PSC_over_PSP + PSC_ext = self.net_dict["PSP_exc_mean"] * PSC_over_PSP # DC input compensates for potentially missing Poisson input - if self.net_dict['poisson_input']: + if self.net_dict["poisson_input"]: DC_amp = np.zeros(self.num_pops) else: if self.Rank == 0: - print('DC input compensates for missing Poisson input.\n') + print("DC input compensates for missing Poisson input.\n") DC_amp = helpers.dc_input_compensating_poisson( - self.net_dict['bg_rate'], self.net_dict['K_ext'], - self.net_dict['neuron_params']['tau_syn'], - PSC_ext) + self.net_dict["bg_rate"], + self.net_dict["K_ext"], + self.net_dict["neuron_params"]["tau_syn"], + PSC_ext, + ) # adjust weights and DC amplitude if the indegree is scaled - if self.net_dict['K_scaling'] != 1: - PSC_matrix_mean, PSC_ext, DC_amp = \ - helpers.adjust_weights_and_input_to_synapse_scaling( - self.net_dict['full_num_neurons'], - full_num_synapses, self.net_dict['K_scaling'], - PSC_matrix_mean, PSC_ext, - self.net_dict['neuron_params']['tau_syn'], - self.net_dict['full_mean_rates'], - DC_amp, - self.net_dict['poisson_input'], - self.net_dict['bg_rate'], self.net_dict['K_ext']) + if self.net_dict["K_scaling"] != 1: + ( + PSC_matrix_mean, + PSC_ext, + DC_amp, + ) = helpers.adjust_weights_and_input_to_synapse_scaling( + self.net_dict["full_num_neurons"], + full_num_synapses, + self.net_dict["K_scaling"], + PSC_matrix_mean, + PSC_ext, + self.net_dict["neuron_params"]["tau_syn"], + self.net_dict["full_mean_rates"], + DC_amp, + self.net_dict["poisson_input"], + self.net_dict["bg_rate"], + self.net_dict["K_ext"], + ) # store final parameters as class attributes self.weight_matrix_mean = PSC_matrix_mean @@ -256,44 +266,39 @@ def __derive_parameters(self): self.DC_amp = DC_amp # thalamic input - if self.stim_dict['thalamic_input']: + if self.stim_dict["thalamic_input"]: num_th_synapses = helpers.num_synapses_from_conn_probs( - self.stim_dict['conn_probs_th'], - self.stim_dict['num_th_neurons'], - self.net_dict['full_num_neurons'])[0] - self.weight_th = self.stim_dict['PSP_th'] * PSC_over_PSP - if self.net_dict['K_scaling'] != 1: - num_th_synapses *= self.net_dict['K_scaling'] - self.weight_th /= np.sqrt(self.net_dict['K_scaling']) + self.stim_dict["conn_probs_th"], + self.stim_dict["num_th_neurons"], + self.net_dict["full_num_neurons"], + )[0] + self.weight_th = self.stim_dict["PSP_th"] * PSC_over_PSP + if self.net_dict["K_scaling"] != 1: + num_th_synapses *= self.net_dict["K_scaling"] + self.weight_th /= np.sqrt(self.net_dict["K_scaling"]) self.num_th_synapses = np.round(num_th_synapses).astype(int) if self.Rank == 0: - message = '' - if self.net_dict['N_scaling'] != 1: - message += \ - 'Neuron numbers are scaled by a factor of {:.3f}.\n'.format( - self.net_dict['N_scaling']) - if self.net_dict['K_scaling'] != 1: - message += \ - 'Indegrees are scaled by a factor of {:.3f}.'.format( - self.net_dict['K_scaling']) - message += '\n Weights and DC input are adjusted to compensate.\n' + message = "" + if self.net_dict["N_scaling"] != 1: + message += "Neuron numbers are scaled by a factor of {:.3f}.\n".format(self.net_dict["N_scaling"]) + if self.net_dict["K_scaling"] != 1: + message += "Indegrees are scaled by a factor of {:.3f}.".format(self.net_dict["K_scaling"]) + message += "\n Weights and DC input are adjusted to compensate.\n" print(message) def __setup_ngpu(self): - """ Initializes NEST GPU. - - """ + """Initializes NEST GPU.""" # set seeds for random number generation - master_seed = self.sim_dict['master_seed'] + master_seed = self.sim_dict["master_seed"] ngpu.SetRandomSeed(master_seed) - ngpu.SetKernelStatus({'print_time': self.sim_dict['print_time']}) - self.sim_resolution = self.sim_dict['sim_resolution'] + ngpu.SetKernelStatus({"print_time": self.sim_dict["print_time"]}) + self.sim_resolution = self.sim_dict["sim_resolution"] def __create_neuronal_populations(self): - """ Creates the neuronal populations. + """Creates the neuronal populations. The neuronal populations are created and the parameters are assigned to them. The initial membrane potential of the neurons is drawn from @@ -302,90 +307,95 @@ def __create_neuronal_populations(self): The first and last neuron id of each population is written to file. """ if self.Rank == 0: - print('Creating neuronal populations.') + print("Creating neuronal populations.") self.n_tot_neurons = 0 for i in np.arange(self.num_pops): self.n_tot_neurons = self.n_tot_neurons + self.num_neurons[i] - self.neurons = ngpu.Create(self.net_dict['neuron_model'], - self.n_tot_neurons) - - tau_syn=self.net_dict['neuron_params']['tau_syn'] - E_L=self.net_dict['neuron_params']['E_L'] - V_th=self.net_dict['neuron_params']['V_th'] - V_reset=self.net_dict['neuron_params']['V_reset'] - t_ref=self.net_dict['neuron_params']['t_ref'] - ngpu.SetStatus(self.neurons, {"tau_syn":tau_syn, - "E_L":E_L, - "Theta_rel":V_th - E_L, - "V_reset_rel":V_reset - E_L, - "t_ref":t_ref}) - + self.neurons = ngpu.Create(self.net_dict["neuron_model"], self.n_tot_neurons) + + tau_syn = self.net_dict["neuron_params"]["tau_syn"] + E_L = self.net_dict["neuron_params"]["E_L"] + V_th = self.net_dict["neuron_params"]["V_th"] + V_reset = self.net_dict["neuron_params"]["V_reset"] + t_ref = self.net_dict["neuron_params"]["t_ref"] + ngpu.SetStatus( + self.neurons, + { + "tau_syn": tau_syn, + "E_L": E_L, + "Theta_rel": V_th - E_L, + "V_reset_rel": V_reset - E_L, + "t_ref": t_ref, + }, + ) + self.pops = [] for i in np.arange(self.num_pops): - if i==0: + if i == 0: i_node_0 = 0 i_node_1 = i_node_0 + self.num_neurons[i] - #print("i_node_1 ", i_node_1) + # print("i_node_1 ", i_node_1) population = self.neurons[i_node_0:i_node_1] i_node_0 = i_node_1 - - I_e=self.DC_amp[i] - ngpu.SetStatus(population, {"I_e":I_e}) - - #print(population.i0) - #print(population.n) - - if self.net_dict['V0_type'] == 'optimized': - V_rel_mean = self.net_dict['neuron_params']['V0_mean'] \ - ['optimized'][i] - E_L - V_std = self.net_dict['neuron_params']['V0_std'] \ - ['optimized'][i] - elif self.net_dict['V0_type'] == 'original': - V_rel_mean = self.net_dict['neuron_params']['V0_mean'] \ - ['original'] - E_L, - V_std = self.net_dict['neuron_params']['V0_std']['original'] - else: - raise Exception( - 'V0_type incorrect. ' + - 'Valid options are "optimized" and "original".') - #print("V_rel_mean", V_rel_mean) - #print("V_std", V_std) - #print("pop size: ", len(population)) - ngpu.SetStatus(population, {"V_m_rel": {"distribution":"normal", - "mu":V_rel_mean, - "sigma":V_std } } ) + I_e = self.DC_amp[i] + ngpu.SetStatus(population, {"I_e": I_e}) + + # print(population.i0) + # print(population.n) + + if self.net_dict["V0_type"] == "optimized": + V_rel_mean = self.net_dict["neuron_params"]["V0_mean"]["optimized"][i] - E_L + V_std = self.net_dict["neuron_params"]["V0_std"]["optimized"][i] + elif self.net_dict["V0_type"] == "original": + V_rel_mean = (self.net_dict["neuron_params"]["V0_mean"]["original"] - E_L,) + V_std = self.net_dict["neuron_params"]["V0_std"]["original"] + else: + raise Exception("V0_type incorrect. " + 'Valid options are "optimized" and "original".') + + # print("V_rel_mean", V_rel_mean) + # print("V_std", V_std) + # print("pop size: ", len(population)) + ngpu.SetStatus( + population, + { + "V_m_rel": { + "distribution": "normal", + "mu": V_rel_mean, + "sigma": V_std, + } + }, + ) self.pops.append(population) # write node ids to file if self.Rank == 0: - fn = os.path.join(self.data_path, 'population_nodeids.dat') - with open(fn, 'w+') as f: + fn = os.path.join(self.data_path, "population_nodeids.dat") + with open(fn, "w+") as f: for pop in self.pops: - f.write('{} {}\n'.format(pop[0], - pop[len(pop)-1])) + f.write("{} {}\n".format(pop[0], pop[len(pop) - 1])) def __create_recording_devices(self): - """ Creates one recording device of each kind per population. + """Creates one recording device of each kind per population. Only devices which are given in ``sim_dict['rec_dev']`` are created. """ if self.Rank == 0: - print('Creating recording devices.') + print("Creating recording devices.") - if 'spike_detector' in self.sim_dict['rec_dev']: + if "spike_detector" in self.sim_dict["rec_dev"]: if self.Rank == 0: - print(' Activating spike time recording.') - #for pop in self.pops: + print(" Activating spike time recording.") + # for pop in self.pops: ngpu.ActivateRecSpikeTimes(self.neurons, 1000) - - #self.spike_detectors = ngpu.Create('spike_detector', + + # self.spike_detectors = ngpu.Create('spike_detector', # self.num_pops) - #if 'voltmeter' in self.sim_dict['rec_dev']: + # if 'voltmeter' in self.sim_dict['rec_dev']: # if self.Rank == 0: # print(' Creating voltmeters.') # self.voltmeters = ngpu.CreateRecord('V_m_rel', @@ -393,7 +403,7 @@ def __create_recording_devices(self): # params=vm_dict) def __create_poisson_bg_input(self): - """ Creates the Poisson generators for ongoing background input if + """Creates the Poisson generators for ongoing background input if specified in ``network_params.py``. If ``poisson_input`` is ``False``, DC input is applied for compensation @@ -401,17 +411,15 @@ def __create_poisson_bg_input(self): """ if self.Rank == 0: - print('Creating Poisson generators for background input.') + print("Creating Poisson generators for background input.") - self.poisson_bg_input = ngpu.Create('poisson_generator', - self.num_pops) - rate_list = self.net_dict['bg_rate'] * self.ext_indegrees + self.poisson_bg_input = ngpu.Create("poisson_generator", self.num_pops) + rate_list = self.net_dict["bg_rate"] * self.ext_indegrees for i_pop in range(self.num_pops): - ngpu.SetStatus([self.poisson_bg_input[i_pop]], - "rate", rate_list[i_pop]) + ngpu.SetStatus([self.poisson_bg_input[i_pop]], "rate", rate_list[i_pop]) def __create_thalamic_stim_input(self): - """ Creates the thalamic neuronal population if specified in + """Creates the thalamic neuronal population if specified in ``stim_dict``. Thalamic neurons are of type ``parrot_neuron`` and receive input from a @@ -421,63 +429,68 @@ def __create_thalamic_stim_input(self): """ if self.Rank == 0: - print('Creating thalamic input for external stimulation.') + print("Creating thalamic input for external stimulation.") - self.thalamic_population = ngpu.Create( - 'parrot_neuron', n=self.stim_dict['num_th_neurons']) + self.thalamic_population = ngpu.Create("parrot_neuron", n=self.stim_dict["num_th_neurons"]) - self.poisson_th = ngpu.Create('poisson_generator') + self.poisson_th = ngpu.Create("poisson_generator") self.poisson_th.set( - rate=self.stim_dict['th_rate'], - start=self.stim_dict['th_start'], - stop=(self.stim_dict['th_start'] + self.stim_dict['th_duration'])) + rate=self.stim_dict["th_rate"], + start=self.stim_dict["th_start"], + stop=(self.stim_dict["th_start"] + self.stim_dict["th_duration"]), + ) def __connect_neuronal_populations(self): - """ Creates the recurrent connections between neuronal populations. """ + """Creates the recurrent connections between neuronal populations.""" if self.Rank == 0: - print('Connecting neuronal populations recurrently.') + print("Connecting neuronal populations recurrently.") for i, target_pop in enumerate(self.pops): for j, source_pop in enumerate(self.pops): - if self.num_synapses[i][j] >= 0.: + if self.num_synapses[i][j] >= 0.0: conn_dict_rec = { - 'rule': 'fixed_total_number', - 'total_num': self.num_synapses[i][j]} + "rule": "fixed_total_number", + "total_num": self.num_synapses[i][j], + } w_mean = self.weight_matrix_mean[i][j] - w_std = abs(self.weight_matrix_mean[i][j] * - self.net_dict['weight_rel_std']) - + w_std = abs(self.weight_matrix_mean[i][j] * self.net_dict["weight_rel_std"]) + if w_mean < 0: - w_min = w_mean-3.0*w_std + w_min = w_mean - 3.0 * w_std w_max = 0.0 # i_receptor = 1 else: w_min = 0.0 - w_max = w_mean+3.0*w_std + w_max = w_mean + 3.0 * w_std # i_receptor = 0 - - d_mean = self.net_dict['delay_matrix_mean'][i][j] - d_std = (self.net_dict['delay_matrix_mean'][i][j] * - self.net_dict['delay_rel_std']) + + d_mean = self.net_dict["delay_matrix_mean"][i][j] + d_std = self.net_dict["delay_matrix_mean"][i][j] * self.net_dict["delay_rel_std"] d_min = self.sim_resolution - d_max = d_mean+3.0*d_std + d_max = d_mean + 3.0 * d_std syn_dict = { - 'weight': {'distribution':'normal_clipped', - 'mu':w_mean, 'low':w_min, - 'high':w_max, - 'sigma':w_std}, - 'delay': {'distribution':'normal_clipped', - 'mu':d_mean, 'low':d_min, - 'high':d_max, - 'sigma':d_std}} - #'receptor':i_receptor} - - ngpu.Connect( - source_pop, target_pop, conn_dict_rec, syn_dict) - - #def __connect_recording_devices(self): + "weight": { + "distribution": "normal_clipped", + "mu": w_mean, + "low": w_min, + "high": w_max, + "sigma": w_std, + }, + "delay": { + "distribution": "normal_clipped", + "mu": d_mean, + "low": d_min, + "high": d_max, + "sigma": d_std, + }, + } + #'receptor':i_receptor} + + ngpu.Connect(source_pop, target_pop, conn_dict_rec, syn_dict) + + # def __connect_recording_devices(self): # """ Connects the recording devices to the microcircuit.""" # if self.Rank == 0: # print('Connecting recording devices.') @@ -490,57 +503,67 @@ def __connect_neuronal_populations(self): # conn_dict, syn_dict) def __connect_poisson_bg_input(self): - """ Connects the Poisson generators to the microcircuit.""" + """Connects the Poisson generators to the microcircuit.""" if self.Rank == 0: - print('Connecting Poisson generators for background input.') + print("Connecting Poisson generators for background input.") for i, target_pop in enumerate(self.pops): - conn_dict_poisson = {'rule': 'all_to_all'} + conn_dict_poisson = {"rule": "all_to_all"} syn_dict_poisson = { - 'weight': self.weight_ext, - 'delay': self.net_dict['delay_poisson']} + "weight": self.weight_ext, + "delay": self.net_dict["delay_poisson"], + } ngpu.Connect( - [self.poisson_bg_input[i]], target_pop, - conn_dict_poisson, syn_dict_poisson) + [self.poisson_bg_input[i]], + target_pop, + conn_dict_poisson, + syn_dict_poisson, + ) def __connect_thalamic_stim_input(self): - """ Connects the thalamic input to the neuronal populations.""" + """Connects the thalamic input to the neuronal populations.""" if self.Rank == 0: - print('Connecting thalamic input.') + print("Connecting thalamic input.") # connect Poisson input to thalamic population ngpu.Connect(self.poisson_th, self.thalamic_population) # connect thalamic population to neuronal populations for i, target_pop in enumerate(self.pops): - conn_dict_th = { - 'rule': 'fixed_total_number', - 'N': self.num_th_synapses[i]} - - w_mean = self.weight_th, - w_std = self.weight_th * self.net_dict['weight_rel_std'] - w_min = 0.0, - w_max = w_mean + 3.0*w_std - - d_mean = self.stim_dict['delay_th_mean'] - d_std = (self.stim_dict['delay_th_mean'] * - self.stim_dict['delay_th_rel_std']) + conn_dict_th = {"rule": "fixed_total_number", "N": self.num_th_synapses[i]} + + w_mean = (self.weight_th,) + w_std = self.weight_th * self.net_dict["weight_rel_std"] + w_min = (0.0,) + w_max = w_mean + 3.0 * w_std + + d_mean = self.stim_dict["delay_th_mean"] + d_std = self.stim_dict["delay_th_mean"] * self.stim_dict["delay_th_rel_std"] d_min = self.sim_resolution - d_max = d_mean + 3.0*d_std + d_max = d_mean + 3.0 * d_std syn_dict_th = { - 'weight': {"distribution":"normal_clipped", - "mu":w_mean, "low":w_min, - "high":w_max, - "sigma":w_std}, - 'delay': {"distribution":"normal_clipped", - "mu":d_mean, "low":d_min, - "high":d_max, - "sigma":d_std}} - - ngpu.Connect( - self.thalamic_population, target_pop, - conn_spec=conn_dict_th, syn_spec=syn_dict_th) + "weight": { + "distribution": "normal_clipped", + "mu": w_mean, + "low": w_min, + "high": w_max, + "sigma": w_std, + }, + "delay": { + "distribution": "normal_clipped", + "mu": d_mean, + "low": d_min, + "high": d_max, + "sigma": d_std, + }, + } + ngpu.Connect( + self.thalamic_population, + target_pop, + conn_spec=conn_dict_th, + syn_spec=syn_dict_th, + ) diff --git a/python/Potjans_2014/network_params.py b/python/Potjans_2014/network_params.py index 84dc86f5d..652694aa1 100644 --- a/python/Potjans_2014/network_params.py +++ b/python/Potjans_2014/network_params.py @@ -33,7 +33,7 @@ def get_exc_inh_matrix(val_exc, val_inh, num_pops): - """ Creates a matrix for excitatory and inhibitory values. + """Creates a matrix for excitatory and inhibitory values. Parameters ---------- @@ -58,109 +58,122 @@ def get_exc_inh_matrix(val_exc, val_inh, num_pops): net_dict = { # factor to scale the number of neurons - 'N_scaling': 1.0, # 0.1, + "N_scaling": 1.0, # 0.1, # factor to scale the indegrees - 'K_scaling': 1.0, # 0.1, + "K_scaling": 1.0, # 0.1, # neuron model - 'neuron_model': 'iaf_psc_exp_g', + "neuron_model": "iaf_psc_exp_g", # names of the simulated neuronal populations - 'populations': ['L23E', 'L23I', 'L4E', 'L4I', 'L5E', 'L5I', 'L6E', 'L6I'], + "populations": ["L23E", "L23I", "L4E", "L4I", "L5E", "L5I", "L6E", "L6I"], # number of neurons in the different populations (same order as # 'populations') - 'full_num_neurons': - np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]), + "full_num_neurons": np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]), # mean rates of the different populations in the non-scaled version of the # microcircuit (in spikes/s; same order as in 'populations'); # necessary for the scaling of the network. # The values were optained by running this PyNEST microcircuit with 12 MPI # processes and both 'N_scaling' and 'K_scaling' set to 1. - 'full_mean_rates': - np.array([0.943, 3.026, 4.368, 5.882, 7.733, 8.664, 1.096, 7.851]), + "full_mean_rates": np.array([0.943, 3.026, 4.368, 5.882, 7.733, 8.664, 1.096, 7.851]), # connection probabilities (the first index corresponds to the targets # and the second to the sources) - 'conn_probs': - np.array( - [[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0., 0.0076, 0.], - [0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0., 0.0042, 0.], - [0.0077, 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.], - [0.0691, 0.0029, 0.0794, 0.1597, 0.0033, 0., 0.1057, 0.], - [0.1004, 0.0622, 0.0505, 0.0057, 0.0831, 0.3726, 0.0204, 0.], - [0.0548, 0.0269, 0.0257, 0.0022, 0.06, 0.3158, 0.0086, 0.], - [0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, 0.0396, 0.2252], - [0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443]]), + "conn_probs": np.array( + [ + [0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0.0, 0.0076, 0.0], + [0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0.0, 0.0042, 0.0], + [0.0077, 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.0], + [0.0691, 0.0029, 0.0794, 0.1597, 0.0033, 0.0, 0.1057, 0.0], + [0.1004, 0.0622, 0.0505, 0.0057, 0.0831, 0.3726, 0.0204, 0.0], + [0.0548, 0.0269, 0.0257, 0.0022, 0.06, 0.3158, 0.0086, 0.0], + [0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, 0.0396, 0.2252], + [0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443], + ] + ), # mean amplitude of excitatory postsynaptic potential (in mV) - 'PSP_exc_mean': 0.15, + "PSP_exc_mean": 0.15, # relative standard deviation of the weight - 'weight_rel_std': 0.1, + "weight_rel_std": 0.1, # relative inhibitory weight - 'g': -4, + "g": -4, # mean delay of excitatory connections (in ms) - 'delay_exc_mean': 1.5, + "delay_exc_mean": 1.5, # mean delay of inhibitory connections (in ms) - 'delay_inh_mean': 0.75, + "delay_inh_mean": 0.75, # relative standard deviation of the delay of excitatory and # inhibitory connections - 'delay_rel_std': 0.5, - + "delay_rel_std": 0.5, # turn Poisson input on or off (True or False) # if False: DC input is applied for compensation - 'poisson_input': True, + "poisson_input": True, # indegree of external connections to the different populations (same order # as in 'populations') - 'K_ext': np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]), + "K_ext": np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]), # rate of the Poisson generator (in spikes/s) - 'bg_rate': 8., + "bg_rate": 8.0, # delay from the Poisson generator to the network (in ms) - 'delay_poisson': 1.5, - + "delay_poisson": 1.5, # initial conditions for the membrane potential, options are: # 'original': uniform mean and standard deviation for all populations as # used in earlier implementations of the model # 'optimized': population-specific mean and standard deviation, allowing a # reduction of the initial activity burst in the network # (default) - 'V0_type': 'optimized', + "V0_type": "optimized", # parameters of the neuron model - 'neuron_params': { + "neuron_params": { # membrane potential average for the neurons (in mV) - 'V0_mean': {'original': -58.0, - 'optimized': [-68.28, -63.16, -63.33, -63.45, - -63.11, -61.66, -66.72, -61.43]}, + "V0_mean": { + "original": -58.0, + "optimized": [ + -68.28, + -63.16, + -63.33, + -63.45, + -63.11, + -61.66, + -66.72, + -61.43, + ], + }, # standard deviation of the average membrane potential (in mV) - 'V0_std': {'original': 10.0, - 'optimized': [5.36, 4.57, 4.74, 4.94, - 4.94, 4.55, 5.46, 4.48]}, + "V0_std": { + "original": 10.0, + "optimized": [5.36, 4.57, 4.74, 4.94, 4.94, 4.55, 5.46, 4.48], + }, # reset membrane potential of the neurons (in mV) - 'E_L': -65.0, + "E_L": -65.0, # threshold potential of the neurons (in mV) - 'V_th': -50.0, + "V_th": -50.0, # membrane potential after a spike (in mV) - 'V_reset': -65.0, + "V_reset": -65.0, # membrane capacitance (in pF) - 'C_m': 250.0, + "C_m": 250.0, # membrane time constant (in ms) - 'tau_m': 10.0, + "tau_m": 10.0, # time constant of postsynaptic currents (in ms) - 'tau_syn': 0.5, + "tau_syn": 0.5, # refractory period of the neurons after a spike (in ms) - 't_ref': 2.0}} + "t_ref": 2.0, + }, +} # derive matrix of mean PSPs, # the mean PSP of the connection from L4E to L23E is doubled PSP_matrix_mean = get_exc_inh_matrix( - net_dict['PSP_exc_mean'], - net_dict['PSP_exc_mean'] * net_dict['g'], - len(net_dict['populations'])) -PSP_matrix_mean[0, 2] = 2. * net_dict['PSP_exc_mean'] + net_dict["PSP_exc_mean"], + net_dict["PSP_exc_mean"] * net_dict["g"], + len(net_dict["populations"]), +) +PSP_matrix_mean[0, 2] = 2.0 * net_dict["PSP_exc_mean"] updated_dict = { # matrix of mean PSPs - 'PSP_matrix_mean': PSP_matrix_mean, - + "PSP_matrix_mean": PSP_matrix_mean, # matrix of mean delays - 'delay_matrix_mean': get_exc_inh_matrix( - net_dict['delay_exc_mean'], - net_dict['delay_inh_mean'], - len(net_dict['populations']))} + "delay_matrix_mean": get_exc_inh_matrix( + net_dict["delay_exc_mean"], + net_dict["delay_inh_mean"], + len(net_dict["populations"]), + ), +} net_dict.update(updated_dict) diff --git a/python/Potjans_2014/run_microcircuit.py b/python/Potjans_2014/run_microcircuit.py index 9d211f962..d0ab39273 100644 --- a/python/Potjans_2014/run_microcircuit.py +++ b/python/Potjans_2014/run_microcircuit.py @@ -32,13 +32,16 @@ ############################################################################### # Import the necessary modules and start the time measurements. -from stimulus_params import stim_dict -from network_params import net_dict -from sim_params import sim_dict +import time + import network -#import nestgpu as ngpu + +# import nestgpu as ngpu import numpy as np -import time +from network_params import net_dict +from sim_params import sim_dict +from stimulus_params import stim_dict + time_start = time.time() ############################################################################### @@ -60,10 +63,10 @@ net.connect() time_connect = time.time() -net.simulate(sim_dict['t_presim']) +net.simulate(sim_dict["t_presim"]) time_presimulate = time.time() -net.simulate(sim_dict['t_sim']) +net.simulate(sim_dict["t_sim"]) time_simulate = time.time() ############################################################################### @@ -74,10 +77,8 @@ # The computation of spike rates discards the presimulation time to exclude # initialization artifacts. -raster_plot_interval = np.array([stim_dict['th_start'] - 100.0, - stim_dict['th_start'] + 100.0]) -firing_rates_interval = np.array([sim_dict['t_presim'], - sim_dict['t_presim'] + sim_dict['t_sim']]) +raster_plot_interval = np.array([stim_dict["th_start"] - 100.0, stim_dict["th_start"] + 100.0]) +firing_rates_interval = np.array([sim_dict["t_presim"], sim_dict["t_presim"] + sim_dict["t_sim"]]) net.evaluate(raster_plot_interval, firing_rates_interval) time_evaluate = time.time() @@ -86,25 +87,12 @@ # data evaluation and print calls. print( - '\nTimes:\n' + # of Rank {}:\n'.format( .Rank()) + - ' Total time: {:.3f} s\n'.format( - time_evaluate - - time_start) + - ' Time to initialize: {:.3f} s\n'.format( - time_network - - time_start) + - ' Time to create: {:.3f} s\n'.format( - time_create - - time_network) + - ' Time to connect: {:.3f} s\n'.format( - time_connect - - time_create) + - ' Time to presimulate: {:.3f} s\n'.format( - time_presimulate - - time_connect) + - ' Time to simulate: {:.3f} s\n'.format( - time_simulate - - time_presimulate) + - ' Time to evaluate: {:.3f} s\n'.format( - time_evaluate - - time_simulate)) + "\nTimes:\n" + + " Total time: {:.3f} s\n".format(time_evaluate - time_start) # of Rank {}:\n'.format( .Rank()) + + + " Time to initialize: {:.3f} s\n".format(time_network - time_start) + + " Time to create: {:.3f} s\n".format(time_create - time_network) + + " Time to connect: {:.3f} s\n".format(time_connect - time_create) + + " Time to presimulate: {:.3f} s\n".format(time_presimulate - time_connect) + + " Time to simulate: {:.3f} s\n".format(time_simulate - time_presimulate) + + " Time to evaluate: {:.3f} s\n".format(time_evaluate - time_simulate) +) diff --git a/python/Potjans_2014/sim_params.py b/python/Potjans_2014/sim_params.py index f6c328367..4e2e7484b 100644 --- a/python/Potjans_2014/sim_params.py +++ b/python/Potjans_2014/sim_params.py @@ -34,26 +34,27 @@ # The full simulation time is the sum of a presimulation time and the main # simulation time. # presimulation time (in ms) - 't_presim': 500.0, + "t_presim": 500.0, # simulation time (in ms) - 't_sim': 10000.0, + "t_sim": 10000.0, # resolution of the simulation (in ms) - 'sim_resolution': 0.1, + "sim_resolution": 0.1, # list of recording devices, default is 'spike_detector'. A 'voltmeter' can # be added to record membrane voltages of the neurons. Nothing will be # recorded if an empty list is given. - 'rec_dev': ['spike_detector'], + "rec_dev": ["spike_detector"], # path to save the output data - 'data_path': os.path.join(os.getcwd(), 'data/'), + "data_path": os.path.join(os.getcwd(), "data/"), # masterseed for NEST GPU and NumPy - 'master_seed': 12349, #55, + "master_seed": 12349, # 55, # number of threads per MPI process - 'local_num_threads': 1, + "local_num_threads": 1, # recording interval of the membrane potential (in ms) - 'rec_V_int': 1.0, + "rec_V_int": 1.0, # if True, data will be overwritten, # if False, a NESTError is raised if the files already exist - 'overwrite_files': True, + "overwrite_files": True, # print the time progress. This should only be used when the simulation # is run on a local machine. - 'print_time': False} + "print_time": False, +} diff --git a/python/Potjans_2014/sim_params_norec.py b/python/Potjans_2014/sim_params_norec.py index 8f7433b65..97f03ef4a 100644 --- a/python/Potjans_2014/sim_params_norec.py +++ b/python/Potjans_2014/sim_params_norec.py @@ -34,26 +34,27 @@ # The full simulation time is the sum of a presimulation time and the main # simulation time. # presimulation time (in ms) - 't_presim': 500.0, + "t_presim": 500.0, # simulation time (in ms) - 't_sim': 10000.0, + "t_sim": 10000.0, # resolution of the simulation (in ms) - 'sim_resolution': 0.1, + "sim_resolution": 0.1, # list of recording devices, default is 'spike_detector'. A 'voltmeter' can # be added to record membrane voltages of the neurons. Nothing will be # recorded if an empty list is given. - 'rec_dev': [], + "rec_dev": [], # path to save the output data - 'data_path': os.path.join(os.getcwd(), 'data/'), + "data_path": os.path.join(os.getcwd(), "data/"), # masterseed for NESTGPU and NumPy - 'master_seed': 12349, #55, + "master_seed": 12349, # 55, # number of threads per MPI process - 'local_num_threads': 1, + "local_num_threads": 1, # recording interval of the membrane potential (in ms) - 'rec_V_int': 1.0, + "rec_V_int": 1.0, # if True, data will be overwritten, # if False, a NESTError is raised if the files already exist - 'overwrite_files': True, + "overwrite_files": True, # print the time progress. This should only be used when the simulation # is run on a local machine. - 'print_time': False} + "print_time": False, +} diff --git a/python/Potjans_2014/stimulus_params.py b/python/Potjans_2014/stimulus_params.py index 42d397dfd..c79f53768 100644 --- a/python/Potjans_2014/stimulus_params.py +++ b/python/Potjans_2014/stimulus_params.py @@ -34,34 +34,33 @@ stim_dict = { # optional thalamic input # turn thalamic input on or off (True or False) - 'thalamic_input': False, + "thalamic_input": False, # start of the thalamic input (in ms) - 'th_start': 700.0, + "th_start": 700.0, # duration of the thalamic input (in ms) - 'th_duration': 10.0, + "th_duration": 10.0, # rate of the thalamic input (in spikes/s) - 'th_rate': 120.0, + "th_rate": 120.0, # number of thalamic neurons - 'num_th_neurons': 902, + "num_th_neurons": 902, # connection probabilities of the thalamus to the different populations # (same order as in 'populations' in 'net_dict') - 'conn_probs_th': - np.array([0.0, 0.0, 0.0983, 0.0619, 0.0, 0.0, 0.0512, 0.0196]), + "conn_probs_th": np.array([0.0, 0.0, 0.0983, 0.0619, 0.0, 0.0, 0.0512, 0.0196]), # mean amplitude of the thalamic postsynaptic potential (in mV), # standard deviation will be taken from 'net_dict' - 'PSP_th': 0.15, + "PSP_th": 0.15, # mean delay of the thalamic input (in ms) - 'delay_th_mean': 1.5, + "delay_th_mean": 1.5, # relative standard deviation of the thalamic delay (in ms) - 'delay_th_rel_std': 0.5, - + "delay_th_rel_std": 0.5, # optional DC input # turn DC input on or off (True or False) - 'dc_input': False, + "dc_input": False, # start of the DC input (in ms) - 'dc_start': 650.0, + "dc_start": 650.0, # duration of the DC input (in ms) - 'dc_dur': 100.0, + "dc_dur": 100.0, # amplitude of the DC input (in pA); final amplitude is population-specific # and will be obtained by multiplication with 'K_ext' - 'dc_amp': 0.3} + "dc_amp": 0.3, +} diff --git a/python/Potjans_2014_hc/README.rst b/python/Potjans_2014_hc/README.rst index 6fff2a8dc..58e7933d5 100644 --- a/python/Potjans_2014_hc/README.rst +++ b/python/Potjans_2014_hc/README.rst @@ -98,7 +98,7 @@ References .. [1] Potjans TC. and Diesmann M. 2014. The cell-type specific cortical microcircuit: relating structure and activity in a full-scale spiking network model. Cerebral Cortex. 24(3):785–806. DOI: `10.1093/cercor/bhs358 <https://doi.org/10.1093/cercor/bhs358>`__. - + .. [2] van Albada SJ., Rowley AG., Senk J., Hopkins M., Schmidt M., Stokes AB., Lester DR., Diesmann M. and Furber SB. 2018. Performance Comparison of the Digital Neuromorphic Hardware SpiNNaker and the Neural Network Simulation Software NEST for a Full-Scale Cortical Microcircuit Model. diff --git a/python/Potjans_2014_hc/eval_microcircuit_time.py b/python/Potjans_2014_hc/eval_microcircuit_time.py index 3c2fa89b9..9dbb3956f 100644 --- a/python/Potjans_2014_hc/eval_microcircuit_time.py +++ b/python/Potjans_2014_hc/eval_microcircuit_time.py @@ -32,13 +32,16 @@ ############################################################################### # Import the necessary modules and start the time measurements. -from stimulus_params import stim_dict -from network_params import net_dict -from sim_params_norec import sim_dict +import time + import network -#import nestgpu as ngpu + +# import nestgpu as ngpu import numpy as np -import time +from network_params import net_dict +from sim_params_norec import sim_dict +from stimulus_params import stim_dict + time_start = time.time() ############################################################################### @@ -60,10 +63,10 @@ net.connect() time_connect = time.time() -net.simulate(sim_dict['t_presim']) +net.simulate(sim_dict["t_presim"]) time_presimulate = time.time() -net.simulate(sim_dict['t_sim']) +net.simulate(sim_dict["t_sim"]) time_simulate = time.time() @@ -72,22 +75,11 @@ # data evaluation and print calls. print( - '\nTimes:\n' + # of Rank {}:\n'.format( .Rank()) + - ' Total time: {:.3f} s\n'.format( - time_simulate - - time_start) + - ' Time to initialize: {:.3f} s\n'.format( - time_network - - time_start) + - ' Time to create: {:.3f} s\n'.format( - time_create - - time_network) + - ' Time to connect: {:.3f} s\n'.format( - time_connect - - time_create) + - ' Time to calibrate: {:.3f} s\n'.format( - time_presimulate - - time_connect) + - ' Time to simulate: {:.3f} s\n'.format( - time_simulate - - time_presimulate) ) + "\nTimes:\n" + + " Total time: {:.3f} s\n".format(time_simulate - time_start) # of Rank {}:\n'.format( .Rank()) + + + " Time to initialize: {:.3f} s\n".format(time_network - time_start) + + " Time to create: {:.3f} s\n".format(time_create - time_network) + + " Time to connect: {:.3f} s\n".format(time_connect - time_create) + + " Time to calibrate: {:.3f} s\n".format(time_presimulate - time_connect) + + " Time to simulate: {:.3f} s\n".format(time_simulate - time_presimulate) +) diff --git a/python/Potjans_2014_hc/helpers.py b/python/Potjans_2014_hc/helpers.py index 00f1a3f95..ac7f89b01 100644 --- a/python/Potjans_2014_hc/helpers.py +++ b/python/Potjans_2014_hc/helpers.py @@ -29,14 +29,17 @@ """ -from matplotlib.patches import Polygon -import matplotlib.pyplot as plt import os import sys + +import matplotlib.pyplot as plt import numpy as np -if 'DISPLAY' not in os.environ: +from matplotlib.patches import Polygon + +if "DISPLAY" not in os.environ: import matplotlib - matplotlib.use('Agg') + + matplotlib.use("Agg") def num_synapses_from_conn_probs(conn_probs, popsize1, popsize2): @@ -60,12 +63,12 @@ def num_synapses_from_conn_probs(conn_probs, popsize1, popsize2): Matrix of synapse numbers. """ prod = np.outer(popsize1, popsize2) - num_synapses = np.log(1. - conn_probs) / np.log((prod - 1.) / prod) + num_synapses = np.log(1.0 - conn_probs) / np.log((prod - 1.0) / prod) return num_synapses def postsynaptic_potential_to_current(C_m, tau_m, tau_syn): - """ Computes a factor to convert postsynaptic potentials to currents. + """Computes a factor to convert postsynaptic potentials to currents. The time course of the postsynaptic potential ``v`` is computed as :math: `v(t)=(i*h)(t)` @@ -108,16 +111,16 @@ def postsynaptic_potential_to_current(C_m, tau_m, tau_syn): (in pA). """ - sub = 1. / (tau_syn - tau_m) + sub = 1.0 / (tau_syn - tau_m) pre = tau_m * tau_syn / C_m * sub frac = (tau_m / tau_syn) ** sub - PSC_over_PSP = 1. / (pre * (frac**tau_m - frac**tau_syn)) + PSC_over_PSP = 1.0 / (pre * (frac**tau_m - frac**tau_syn)) return PSC_over_PSP def dc_input_compensating_poisson(bg_rate, K_ext, tau_syn, PSC_ext): - """ Computes DC input if no Poisson input is provided to the microcircuit. + """Computes DC input if no Poisson input is provided to the microcircuit. Parameters ---------- @@ -140,18 +143,19 @@ def dc_input_compensating_poisson(bg_rate, K_ext, tau_syn, PSC_ext): def adjust_weights_and_input_to_synapse_scaling( - full_num_neurons, - full_num_synapses, - K_scaling, - mean_PSC_matrix, - PSC_ext, - tau_syn, - full_mean_rates, - DC_amp, - poisson_input, - bg_rate, - K_ext): - """ Adjusts weights and external input to scaling of indegrees. + full_num_neurons, + full_num_synapses, + K_scaling, + mean_PSC_matrix, + PSC_ext, + tau_syn, + full_mean_rates, + DC_amp, + poisson_input, + bg_rate, + K_ext, +): + """Adjusts weights and external input to scaling of indegrees. The recurrent and external weights are adjusted to the scaling of the indegrees. Extra DC input is added to compensate for the @@ -196,22 +200,19 @@ def adjust_weights_and_input_to_synapse_scaling( PSC_ext_new = PSC_ext / np.sqrt(K_scaling) # recurrent input of full network - indegree_matrix = \ - full_num_synapses / full_num_neurons[:, np.newaxis] - input_rec = np.sum(mean_PSC_matrix * indegree_matrix * full_mean_rates, - axis=1) + indegree_matrix = full_num_synapses / full_num_neurons[:, np.newaxis] + input_rec = np.sum(mean_PSC_matrix * indegree_matrix * full_mean_rates, axis=1) - DC_amp_new = DC_amp \ - + 0.001 * tau_syn * (1. - np.sqrt(K_scaling)) * input_rec + DC_amp_new = DC_amp + 0.001 * tau_syn * (1.0 - np.sqrt(K_scaling)) * input_rec if poisson_input: input_ext = PSC_ext * K_ext * bg_rate - DC_amp_new += 0.001 * tau_syn * (1. - np.sqrt(K_scaling)) * input_ext + DC_amp_new += 0.001 * tau_syn * (1.0 - np.sqrt(K_scaling)) * input_ext return PSC_matrix_new, PSC_ext_new, DC_amp_new def plot_raster(path, name, begin, end, N_scaling): - """ Creates a spike raster plot of the network activity. + """Creates a spike raster plot of the network activity. Parameters ----------- @@ -232,34 +233,33 @@ def plot_raster(path, name, begin, end, N_scaling): """ fs = 18 # fontsize - ylabels = ['L2/3', 'L4', 'L5', 'L6'] - color_list = np.tile(['#595289', '#af143c'], 4) + ylabels = ["L2/3", "L4", "L5", "L6"] + color_list = np.tile(["#595289", "#af143c"], 4) sd_names, node_ids, data = __load_spike_times(path, name, begin, end) last_node_id = node_ids[-1, -1] mod_node_ids = np.abs(node_ids - last_node_id) + 1 - label_pos = [(mod_node_ids[i, 0] + mod_node_ids[i + 1, 1]) / - 2. for i in np.arange(0, 8, 2)] + label_pos = [(mod_node_ids[i, 0] + mod_node_ids[i + 1, 1]) / 2.0 for i in np.arange(0, 8, 2)] stp = 1 if N_scaling > 0.1: - stp = int(10. * N_scaling) - print(' Only spikes of neurons in steps of {} are shown.'.format(stp)) + stp = int(10.0 * N_scaling) + print(" Only spikes of neurons in steps of {} are shown.".format(stp)) plt.figure(figsize=(8, 6)) for i, n in enumerate(sd_names): - times = data[i]['time_ms'] - neurons = np.abs(data[i]['sender'] - last_node_id) + 1 - plt.plot(times[::stp], neurons[::stp], '.', color=color_list[i]) - plt.xlabel('time [ms]', fontsize=fs) + times = data[i]["time_ms"] + neurons = np.abs(data[i]["sender"] - last_node_id) + 1 + plt.plot(times[::stp], neurons[::stp], ".", color=color_list[i]) + plt.xlabel("time [ms]", fontsize=fs) plt.xticks(fontsize=fs) plt.yticks(label_pos, ylabels, fontsize=fs) - plt.savefig(os.path.join(path, 'raster_plot.png'), dpi=300) + plt.savefig(os.path.join(path, "raster_plot.png"), dpi=300) def firing_rates(path, name, begin, end): - """ Computes mean and standard deviation of firing rates per population. + """Computes mean and standard deviation of firing rates per population. The firing rate of each neuron in each population is computed and stored in a .dat file in the directory of the spike detectors. The mean firing @@ -285,23 +285,21 @@ def firing_rates(path, name, begin, end): all_mean_rates = [] all_std_rates = [] for i, n in enumerate(sd_names): - senders = data[i]['sender'] + senders = data[i]["sender"] # 1 more bin than node ids per population bins = np.arange(node_ids[i, 0], node_ids[i, 1] + 2) spike_count_per_neuron, _ = np.histogram(senders, bins=bins) - rate_per_neuron = spike_count_per_neuron * 1000. / (end - begin) - np.savetxt(os.path.join(path, ('rate' + str(i) + '.dat')), - rate_per_neuron) + rate_per_neuron = spike_count_per_neuron * 1000.0 / (end - begin) + np.savetxt(os.path.join(path, ("rate" + str(i) + ".dat")), rate_per_neuron) # zeros are included all_mean_rates.append(np.mean(rate_per_neuron)) all_std_rates.append(np.std(rate_per_neuron)) - print('Mean rates: {} spikes/s'.format(np.around(all_mean_rates, decimals=3))) - print('Standard deviation of rates: {} spikes/s'.format( - np.around(all_std_rates, decimals=3))) + print("Mean rates: {} spikes/s".format(np.around(all_mean_rates, decimals=3))) + print("Standard deviation of rates: {} spikes/s".format(np.around(all_std_rates, decimals=3))) def boxplot(path, populations): - """ Creates a boxblot of the firing rates of all populations. + """Creates a boxblot of the firing rates of all populations. To create the boxplot, the firing rates of each neuron in each population need to be computed with the function ``firing_rate()``. @@ -319,29 +317,36 @@ def boxplot(path, populations): """ fs = 18 - pop_names = [string.replace('23', '2/3') for string in populations] + pop_names = [string.replace("23", "2/3") for string in populations] label_pos = list(range(len(populations), 0, -1)) - color_list = ['#af143c', '#595289'] - medianprops = dict(linestyle='-', linewidth=2.5, color='black') - meanprops = dict(linestyle='--', linewidth=2.5, color='lightgray') + color_list = ["#af143c", "#595289"] + medianprops = dict(linestyle="-", linewidth=2.5, color="black") + meanprops = dict(linestyle="--", linewidth=2.5, color="lightgray") rates_per_neuron_rev = [] for i in np.arange(len(populations))[::-1]: - rates_per_neuron_rev.append( - np.loadtxt(os.path.join(path, ('rate' + str(i) + '.dat')))) + rates_per_neuron_rev.append(np.loadtxt(os.path.join(path, ("rate" + str(i) + ".dat")))) plt.figure(figsize=(8, 6)) - bp = plt.boxplot(rates_per_neuron_rev, 0, 'rs', 0, medianprops=medianprops, - meanprops=meanprops, meanline=True, showmeans=True) - plt.setp(bp['boxes'], color='black') - plt.setp(bp['whiskers'], color='black') - plt.setp(bp['fliers'], color='red', marker='+') + bp = plt.boxplot( + rates_per_neuron_rev, + 0, + "rs", + 0, + medianprops=medianprops, + meanprops=meanprops, + meanline=True, + showmeans=True, + ) + plt.setp(bp["boxes"], color="black") + plt.setp(bp["whiskers"], color="black") + plt.setp(bp["fliers"], color="red", marker="+") # boxcolors for i in np.arange(len(populations)): boxX = [] boxY = [] - box = bp['boxes'][i] + box = bp["boxes"][i] for j in list(range(5)): boxX.append(box.get_xdata()[j]) boxY.append(box.get_ydata()[j]) @@ -349,14 +354,14 @@ def boxplot(path, populations): k = i % 2 boxPolygon = Polygon(boxCoords, facecolor=color_list[k]) plt.gca().add_patch(boxPolygon) - plt.xlabel('firing rate [spikes/s]', fontsize=fs) + plt.xlabel("firing rate [spikes/s]", fontsize=fs) plt.yticks(label_pos, pop_names, fontsize=fs) plt.xticks(fontsize=fs) - plt.savefig(os.path.join(path, 'box_plot.png'), dpi=300) + plt.savefig(os.path.join(path, "box_plot.png"), dpi=300) def __gather_metadata(path): - """ Reads first and last ids of + """Reads first and last ids of neurons in each population. Parameters @@ -371,16 +376,16 @@ def __gather_metadata(path): """ # load node IDs - node_idfile = open(path + 'population_nodeids.dat', 'r') + node_idfile = open(path + "population_nodeids.dat", "r") node_ids = [] for l in node_idfile: node_ids.append(l.split()) - node_ids = np.array(node_ids, dtype='i4') + node_ids = np.array(node_ids, dtype="i4") return node_ids def __load_spike_times(path, name, begin, end): - """ Loads spike times of each spike detector. + """Loads spike times of each spike detector. Parameters ---------- @@ -402,20 +407,19 @@ def __load_spike_times(path, name, begin, end): """ node_ids = __gather_metadata(path) data = {} - dtype = {'names': ('sender', 'time_ms'), # as in header - 'formats': ('i4', 'f8')} - #print(node_ids) + dtype = {"names": ("sender", "time_ms"), "formats": ("i4", "f8")} # as in header + # print(node_ids) sd_names = {} - + for i_pop in range(8): - fn = os.path.join(path, 'spike_times_' + str(i_pop) + '.dat') + fn = os.path.join(path, "spike_times_" + str(i_pop) + ".dat") data_i_raw = np.loadtxt(fn, skiprows=1, dtype=dtype) - data_i_raw = np.sort(data_i_raw, order='time_ms') + data_i_raw = np.sort(data_i_raw, order="time_ms") # begin and end are included if they exist - low = np.searchsorted(data_i_raw['time_ms'], v=begin, side='left') - high = np.searchsorted(data_i_raw['time_ms'], v=end, side='right') + low = np.searchsorted(data_i_raw["time_ms"], v=begin, side="left") + high = np.searchsorted(data_i_raw["time_ms"], v=end, side="right") data[i_pop] = data_i_raw[low:high] - sd_names[i_pop] = 'spike_times_' + str(i_pop) + sd_names[i_pop] = "spike_times_" + str(i_pop) return sd_names, node_ids, data diff --git a/python/Potjans_2014_hc/network.py b/python/Potjans_2014_hc/network.py index 230802092..cc54abd59 100644 --- a/python/Potjans_2014_hc/network.py +++ b/python/Potjans_2014_hc/network.py @@ -27,13 +27,14 @@ """ import os -import numpy as np -import nestgpu as ngpu + import helpers +import nestgpu as ngpu +import numpy as np class Network: - """ Provides functions to setup NEST GPU, to create and connect all nodes + """Provides functions to setup NEST GPU, to create and connect all nodes of the network, to simulate, and to evaluate the resulting spike data. Instantiating a Network object derives dependent parameters and already @@ -60,17 +61,16 @@ def __init__(self, sim_dict, net_dict, stim_dict=None): self.Rank = 0 # data directory - self.data_path = sim_dict['data_path'] + self.data_path = sim_dict["data_path"] if self.Rank == 0: if os.path.isdir(self.data_path): - message = ' Directory already existed.' - if self.sim_dict['overwrite_files']: - message += ' Old data will be overwritten.' + message = " Directory already existed." + if self.sim_dict["overwrite_files"]: + message += " Old data will be overwritten." else: os.mkdir(self.data_path) - message = ' Directory has been created.' - print('Data will be written to: {}\n{}\n'.format(self.data_path, - message)) + message = " Directory has been created." + print("Data will be written to: {}\n{}\n".format(self.data_path, message)) # derive parameters based on input dictionaries self.__derive_parameters() @@ -79,23 +79,23 @@ def __init__(self, sim_dict, net_dict, stim_dict=None): self.__setup_ngpu() def create(self): - """ Creates all network nodes. + """Creates all network nodes. Neuronal populations and recording and stimulating devices are created. """ self.__create_neuronal_populations() - if len(self.sim_dict['rec_dev']) > 0: + if len(self.sim_dict["rec_dev"]) > 0: self.__create_recording_devices() - if self.net_dict['poisson_input']: + if self.net_dict["poisson_input"]: self.__create_poisson_bg_input() - if self.stim_dict['thalamic_input']: + if self.stim_dict["thalamic_input"]: self.__create_thalamic_stim_input() - if self.stim_dict['dc_input']: + if self.stim_dict["dc_input"]: self.__create_dc_stim_input() def connect(self): - """ Connects the network. + """Connects the network. Recurrent connections among neurons of the neuronal populations are established, and recording and stimulating devices are connected. @@ -113,20 +113,20 @@ def connect(self): """ self.__connect_neuronal_populations() - #if len(self.sim_dict['rec_dev']) > 0: + # if len(self.sim_dict['rec_dev']) > 0: # self.__connect_recording_devices() - if self.net_dict['poisson_input']: + if self.net_dict["poisson_input"]: self.__connect_poisson_bg_input() - if self.stim_dict['thalamic_input']: + if self.stim_dict["thalamic_input"]: self.__connect_thalamic_stim_input() - if self.stim_dict['dc_input']: + if self.stim_dict["dc_input"]: self.__connect_dc_stim_input() - #ngpu.Prepare() - #ngpu.Cleanup() + # ngpu.Prepare() + # ngpu.Cleanup() def simulate(self, t_sim): - """ Simulates the microcircuit. + """Simulates the microcircuit. Parameters ---------- @@ -135,12 +135,12 @@ def simulate(self, t_sim): """ if self.Rank == 0: - print('Simulating {} ms.'.format(t_sim)) + print("Simulating {} ms.".format(t_sim)) ngpu.Simulate(t_sim) def evaluate(self, raster_plot_interval, firing_rates_interval): - """ Displays simulation results. + """Displays simulation results. Creates a spike raster plot. Calculates the firing rate of each population and displays them as a @@ -166,89 +166,99 @@ def evaluate(self, raster_plot_interval, firing_rates_interval): for i_pop in range(len(self.pops)): population = self.pops[i_pop] data = [] - spike_times_list = spike_times_net[popid:popid+len(population)] + spike_times_list = spike_times_net[popid : popid + len(population)] popid += len(population) for i_neur in range(len(population)): spike_times = spike_times_list[i_neur] - if (len(spike_times) != 0): + if len(spike_times) != 0: # print("i_pop:", i_pop, " i_neur:", i_neur, " n_spikes:", # len(spike_times)) for t in spike_times: data.append([population[i_neur], t]) arr = np.array(data) - fn = os.path.join(self.data_path, 'spike_times_' + str(i_pop) + - '.dat') - fmt='%d\t%.3f' - np.savetxt(fn, arr, fmt=fmt, header="sender time_ms", - comments='') + fn = os.path.join(self.data_path, "spike_times_" + str(i_pop) + ".dat") + fmt = "%d\t%.3f" + np.savetxt(fn, arr, fmt=fmt, header="sender time_ms", comments="") if self.Rank == 0: - print('Interval to plot spikes: {} ms'.format(raster_plot_interval)) + print("Interval to plot spikes: {} ms".format(raster_plot_interval)) helpers.plot_raster( self.data_path, - 'spike_detector', + "spike_detector", raster_plot_interval[0], raster_plot_interval[1], - self.net_dict['N_scaling']) + self.net_dict["N_scaling"], + ) - print('Interval to compute firing rates: {} ms'.format( - firing_rates_interval)) + print("Interval to compute firing rates: {} ms".format(firing_rates_interval)) helpers.firing_rates( - self.data_path, 'spike_detector', - firing_rates_interval[0], firing_rates_interval[1]) - helpers.boxplot(self.data_path, self.net_dict['populations']) + self.data_path, + "spike_detector", + firing_rates_interval[0], + firing_rates_interval[1], + ) + helpers.boxplot(self.data_path, self.net_dict["populations"]) def __derive_parameters(self): """ Derives and adjusts parameters and stores them as class attributes. """ - self.num_pops = len(self.net_dict['populations']) + self.num_pops = len(self.net_dict["populations"]) # total number of synapses between neuronal populations before scaling full_num_synapses = helpers.num_synapses_from_conn_probs( - self.net_dict['conn_probs'], - self.net_dict['full_num_neurons'], - self.net_dict['full_num_neurons']) + self.net_dict["conn_probs"], + self.net_dict["full_num_neurons"], + self.net_dict["full_num_neurons"], + ) # scaled numbers of neurons and synapses - self.num_neurons = np.round((self.net_dict['full_num_neurons'] * - self.net_dict['N_scaling'])).astype(int) - self.num_synapses = np.round((full_num_synapses * - self.net_dict['N_scaling'] * - self.net_dict['K_scaling'])).astype(int) - self.ext_indegrees = np.round((self.net_dict['K_ext'] * - self.net_dict['K_scaling'])).astype(int) + self.num_neurons = np.round((self.net_dict["full_num_neurons"] * self.net_dict["N_scaling"])).astype(int) + self.num_synapses = np.round( + (full_num_synapses * self.net_dict["N_scaling"] * self.net_dict["K_scaling"]) + ).astype(int) + self.ext_indegrees = np.round((self.net_dict["K_ext"] * self.net_dict["K_scaling"])).astype(int) # conversion from PSPs to PSCs PSC_over_PSP = helpers.postsynaptic_potential_to_current( - self.net_dict['neuron_params']['C_m'], - self.net_dict['neuron_params']['tau_m'], - self.net_dict['neuron_params']['tau_syn']) - PSC_matrix_mean = self.net_dict['PSP_matrix_mean'] * PSC_over_PSP - PSC_ext = self.net_dict['PSP_exc_mean'] * PSC_over_PSP + self.net_dict["neuron_params"]["C_m"], + self.net_dict["neuron_params"]["tau_m"], + self.net_dict["neuron_params"]["tau_syn"], + ) + PSC_matrix_mean = self.net_dict["PSP_matrix_mean"] * PSC_over_PSP + PSC_ext = self.net_dict["PSP_exc_mean"] * PSC_over_PSP # DC input compensates for potentially missing Poisson input - if self.net_dict['poisson_input']: + if self.net_dict["poisson_input"]: DC_amp = np.zeros(self.num_pops) else: if self.Rank == 0: - print('DC input compensates for missing Poisson input.\n') + print("DC input compensates for missing Poisson input.\n") DC_amp = helpers.dc_input_compensating_poisson( - self.net_dict['bg_rate'], self.net_dict['K_ext'], - self.net_dict['neuron_params']['tau_syn'], - PSC_ext) + self.net_dict["bg_rate"], + self.net_dict["K_ext"], + self.net_dict["neuron_params"]["tau_syn"], + PSC_ext, + ) # adjust weights and DC amplitude if the indegree is scaled - if self.net_dict['K_scaling'] != 1: - PSC_matrix_mean, PSC_ext, DC_amp = \ - helpers.adjust_weights_and_input_to_synapse_scaling( - self.net_dict['full_num_neurons'], - full_num_synapses, self.net_dict['K_scaling'], - PSC_matrix_mean, PSC_ext, - self.net_dict['neuron_params']['tau_syn'], - self.net_dict['full_mean_rates'], - DC_amp, - self.net_dict['poisson_input'], - self.net_dict['bg_rate'], self.net_dict['K_ext']) + if self.net_dict["K_scaling"] != 1: + ( + PSC_matrix_mean, + PSC_ext, + DC_amp, + ) = helpers.adjust_weights_and_input_to_synapse_scaling( + self.net_dict["full_num_neurons"], + full_num_synapses, + self.net_dict["K_scaling"], + PSC_matrix_mean, + PSC_ext, + self.net_dict["neuron_params"]["tau_syn"], + self.net_dict["full_mean_rates"], + DC_amp, + self.net_dict["poisson_input"], + self.net_dict["bg_rate"], + self.net_dict["K_ext"], + ) # store final parameters as class attributes self.weight_matrix_mean = PSC_matrix_mean @@ -256,44 +266,39 @@ def __derive_parameters(self): self.DC_amp = DC_amp # thalamic input - if self.stim_dict['thalamic_input']: + if self.stim_dict["thalamic_input"]: num_th_synapses = helpers.num_synapses_from_conn_probs( - self.stim_dict['conn_probs_th'], - self.stim_dict['num_th_neurons'], - self.net_dict['full_num_neurons'])[0] - self.weight_th = self.stim_dict['PSP_th'] * PSC_over_PSP - if self.net_dict['K_scaling'] != 1: - num_th_synapses *= self.net_dict['K_scaling'] - self.weight_th /= np.sqrt(self.net_dict['K_scaling']) + self.stim_dict["conn_probs_th"], + self.stim_dict["num_th_neurons"], + self.net_dict["full_num_neurons"], + )[0] + self.weight_th = self.stim_dict["PSP_th"] * PSC_over_PSP + if self.net_dict["K_scaling"] != 1: + num_th_synapses *= self.net_dict["K_scaling"] + self.weight_th /= np.sqrt(self.net_dict["K_scaling"]) self.num_th_synapses = np.round(num_th_synapses).astype(int) if self.Rank == 0: - message = '' - if self.net_dict['N_scaling'] != 1: - message += \ - 'Neuron numbers are scaled by a factor of {:.3f}.\n'.format( - self.net_dict['N_scaling']) - if self.net_dict['K_scaling'] != 1: - message += \ - 'Indegrees are scaled by a factor of {:.3f}.'.format( - self.net_dict['K_scaling']) - message += '\n Weights and DC input are adjusted to compensate.\n' + message = "" + if self.net_dict["N_scaling"] != 1: + message += "Neuron numbers are scaled by a factor of {:.3f}.\n".format(self.net_dict["N_scaling"]) + if self.net_dict["K_scaling"] != 1: + message += "Indegrees are scaled by a factor of {:.3f}.".format(self.net_dict["K_scaling"]) + message += "\n Weights and DC input are adjusted to compensate.\n" print(message) def __setup_ngpu(self): - """ Initializes NEST GPU. - - """ + """Initializes NEST GPU.""" # set seeds for random number generation - master_seed = self.sim_dict['master_seed'] + master_seed = self.sim_dict["master_seed"] ngpu.SetRandomSeed(master_seed) - ngpu.SetKernelStatus({'print_time': self.sim_dict['print_time']}) - self.sim_resolution = self.sim_dict['sim_resolution'] + ngpu.SetKernelStatus({"print_time": self.sim_dict["print_time"]}) + self.sim_resolution = self.sim_dict["sim_resolution"] def __create_neuronal_populations(self): - """ Creates the neuronal populations. + """Creates the neuronal populations. The neuronal populations are created and the parameters are assigned to them. The initial membrane potential of the neurons is drawn from @@ -302,31 +307,30 @@ def __create_neuronal_populations(self): The first and last neuron id of each population is written to file. """ if self.Rank == 0: - print('Creating neuronal populations.') + print("Creating neuronal populations.") self.n_tot_neurons = 0 for i in np.arange(self.num_pops): self.n_tot_neurons = self.n_tot_neurons + self.num_neurons[i] - self.neurons = ngpu.Create(self.net_dict['neuron_model'], - self.n_tot_neurons) - + self.neurons = ngpu.Create(self.net_dict["neuron_model"], self.n_tot_neurons) + self.pops = [] for i in np.arange(self.num_pops): - if i==0: + if i == 0: i_node_0 = 0 i_node_1 = i_node_0 + self.num_neurons[i] - #print("i_node_1 ", i_node_1) + # print("i_node_1 ", i_node_1) population = self.neurons[i_node_0:i_node_1] i_node_0 = i_node_1 - - tau_syn_ex=self.net_dict['neuron_params']['tau_syn'] - tau_syn_in=self.net_dict['neuron_params']['tau_syn'] - E_L=self.net_dict['neuron_params']['E_L'] - V_th=self.net_dict['neuron_params']['V_th'] - V_reset=self.net_dict['neuron_params']['V_reset'] - t_ref=self.net_dict['neuron_params']['t_ref'] - I_e=self.DC_amp[i] + + tau_syn_ex = self.net_dict["neuron_params"]["tau_syn"] + tau_syn_in = self.net_dict["neuron_params"]["tau_syn"] + E_L = self.net_dict["neuron_params"]["E_L"] + V_th = self.net_dict["neuron_params"]["V_th"] + V_reset = self.net_dict["neuron_params"]["V_reset"] + t_ref = self.net_dict["neuron_params"]["t_ref"] + I_e = self.DC_amp[i] # ngpu.SetStatus(population, {"tau_ex":tau_syn_ex, # "tau_in":tau_syn_in, @@ -335,59 +339,60 @@ def __create_neuronal_populations(self): # "V_reset_rel":V_reset - E_L, # "t_ref":t_ref, # "I_e":I_e}) - #print(population.i0) - #print(population.n) - ngpu.SetStatus(population, {"I_e":I_e}) - - if self.net_dict['V0_type'] == 'optimized': - V_rel_mean = self.net_dict['neuron_params']['V0_mean'] \ - ['optimized'][i] - E_L - V_std = self.net_dict['neuron_params']['V0_std'] \ - ['optimized'][i] - elif self.net_dict['V0_type'] == 'original': - V_rel_mean = self.net_dict['neuron_params']['V0_mean'] \ - ['original'] - E_L, - V_std = self.net_dict['neuron_params']['V0_std']['original'] + # print(population.i0) + # print(population.n) + ngpu.SetStatus(population, {"I_e": I_e}) + + if self.net_dict["V0_type"] == "optimized": + V_rel_mean = self.net_dict["neuron_params"]["V0_mean"]["optimized"][i] - E_L + V_std = self.net_dict["neuron_params"]["V0_std"]["optimized"][i] + elif self.net_dict["V0_type"] == "original": + V_rel_mean = (self.net_dict["neuron_params"]["V0_mean"]["original"] - E_L,) + V_std = self.net_dict["neuron_params"]["V0_std"]["original"] else: - raise Exception( - 'V0_type incorrect. ' + - 'Valid options are "optimized" and "original".') - - #print("V_rel_mean", V_rel_mean) - #print("V_std", V_std) - #print("pop size: ", len(population)) - ngpu.SetStatus(population, {"V_m_rel": {"distribution":"normal", - "mu":V_rel_mean, - "sigma":V_std } } ) + raise Exception("V0_type incorrect. " + 'Valid options are "optimized" and "original".') + + # print("V_rel_mean", V_rel_mean) + # print("V_std", V_std) + # print("pop size: ", len(population)) + ngpu.SetStatus( + population, + { + "V_m_rel": { + "distribution": "normal", + "mu": V_rel_mean, + "sigma": V_std, + } + }, + ) self.pops.append(population) # write node ids to file if self.Rank == 0: - fn = os.path.join(self.data_path, 'population_nodeids.dat') - with open(fn, 'w+') as f: + fn = os.path.join(self.data_path, "population_nodeids.dat") + with open(fn, "w+") as f: for pop in self.pops: - f.write('{} {}\n'.format(pop[0], - pop[len(pop)-1])) + f.write("{} {}\n".format(pop[0], pop[len(pop) - 1])) def __create_recording_devices(self): - """ Creates one recording device of each kind per population. + """Creates one recording device of each kind per population. Only devices which are given in ``sim_dict['rec_dev']`` are created. """ if self.Rank == 0: - print('Creating recording devices.') + print("Creating recording devices.") - if 'spike_detector' in self.sim_dict['rec_dev']: + if "spike_detector" in self.sim_dict["rec_dev"]: if self.Rank == 0: - print(' Activating spike time recording.') - #for pop in self.pops: + print(" Activating spike time recording.") + # for pop in self.pops: ngpu.ActivateRecSpikeTimes(self.neurons, 1000) - - #self.spike_detectors = ngpu.Create('spike_detector', + + # self.spike_detectors = ngpu.Create('spike_detector', # self.num_pops) - #if 'voltmeter' in self.sim_dict['rec_dev']: + # if 'voltmeter' in self.sim_dict['rec_dev']: # if self.Rank == 0: # print(' Creating voltmeters.') # self.voltmeters = ngpu.CreateRecord('V_m_rel', @@ -395,7 +400,7 @@ def __create_recording_devices(self): # params=vm_dict) def __create_poisson_bg_input(self): - """ Creates the Poisson generators for ongoing background input if + """Creates the Poisson generators for ongoing background input if specified in ``network_params.py``. If ``poisson_input`` is ``False``, DC input is applied for compensation @@ -403,17 +408,15 @@ def __create_poisson_bg_input(self): """ if self.Rank == 0: - print('Creating Poisson generators for background input.') + print("Creating Poisson generators for background input.") - self.poisson_bg_input = ngpu.Create('poisson_generator', - self.num_pops) - rate_list = self.net_dict['bg_rate'] * self.ext_indegrees + self.poisson_bg_input = ngpu.Create("poisson_generator", self.num_pops) + rate_list = self.net_dict["bg_rate"] * self.ext_indegrees for i_pop in range(self.num_pops): - ngpu.SetStatus([self.poisson_bg_input[i_pop]], - "rate", rate_list[i_pop]) + ngpu.SetStatus([self.poisson_bg_input[i_pop]], "rate", rate_list[i_pop]) def __create_thalamic_stim_input(self): - """ Creates the thalamic neuronal population if specified in + """Creates the thalamic neuronal population if specified in ``stim_dict``. Thalamic neurons are of type ``parrot_neuron`` and receive input from a @@ -423,63 +426,68 @@ def __create_thalamic_stim_input(self): """ if self.Rank == 0: - print('Creating thalamic input for external stimulation.') + print("Creating thalamic input for external stimulation.") - self.thalamic_population = ngpu.Create( - 'parrot_neuron', n=self.stim_dict['num_th_neurons']) + self.thalamic_population = ngpu.Create("parrot_neuron", n=self.stim_dict["num_th_neurons"]) - self.poisson_th = ngpu.Create('poisson_generator') + self.poisson_th = ngpu.Create("poisson_generator") self.poisson_th.set( - rate=self.stim_dict['th_rate'], - start=self.stim_dict['th_start'], - stop=(self.stim_dict['th_start'] + self.stim_dict['th_duration'])) + rate=self.stim_dict["th_rate"], + start=self.stim_dict["th_start"], + stop=(self.stim_dict["th_start"] + self.stim_dict["th_duration"]), + ) def __connect_neuronal_populations(self): - """ Creates the recurrent connections between neuronal populations. """ + """Creates the recurrent connections between neuronal populations.""" if self.Rank == 0: - print('Connecting neuronal populations recurrently.') + print("Connecting neuronal populations recurrently.") for i, target_pop in enumerate(self.pops): for j, source_pop in enumerate(self.pops): - if self.num_synapses[i][j] >= 0.: + if self.num_synapses[i][j] >= 0.0: conn_dict_rec = { - 'rule': 'fixed_total_number', - 'total_num': self.num_synapses[i][j]} + "rule": "fixed_total_number", + "total_num": self.num_synapses[i][j], + } w_mean = self.weight_matrix_mean[i][j] - w_std = abs(self.weight_matrix_mean[i][j] * - self.net_dict['weight_rel_std']) - + w_std = abs(self.weight_matrix_mean[i][j] * self.net_dict["weight_rel_std"]) + if w_mean < 0: - w_min = w_mean-3.0*w_std + w_min = w_mean - 3.0 * w_std w_max = 0.0 # i_receptor = 1 else: w_min = 0.0 - w_max = w_mean+3.0*w_std + w_max = w_mean + 3.0 * w_std # i_receptor = 0 - - d_mean = self.net_dict['delay_matrix_mean'][i][j] - d_std = (self.net_dict['delay_matrix_mean'][i][j] * - self.net_dict['delay_rel_std']) + + d_mean = self.net_dict["delay_matrix_mean"][i][j] + d_std = self.net_dict["delay_matrix_mean"][i][j] * self.net_dict["delay_rel_std"] d_min = self.sim_resolution - d_max = d_mean+3.0*d_std + d_max = d_mean + 3.0 * d_std syn_dict = { - 'weight': {'distribution':'normal_clipped', - 'mu':w_mean, 'low':w_min, - 'high':w_max, - 'sigma':w_std}, - 'delay': {'distribution':'normal_clipped', - 'mu':d_mean, 'low':d_min, - 'high':d_max, - 'sigma':d_std}} - #'receptor':i_receptor} - - ngpu.Connect( - source_pop, target_pop, conn_dict_rec, syn_dict) - - #def __connect_recording_devices(self): + "weight": { + "distribution": "normal_clipped", + "mu": w_mean, + "low": w_min, + "high": w_max, + "sigma": w_std, + }, + "delay": { + "distribution": "normal_clipped", + "mu": d_mean, + "low": d_min, + "high": d_max, + "sigma": d_std, + }, + } + #'receptor':i_receptor} + + ngpu.Connect(source_pop, target_pop, conn_dict_rec, syn_dict) + + # def __connect_recording_devices(self): # """ Connects the recording devices to the microcircuit.""" # if self.Rank == 0: # print('Connecting recording devices.') @@ -492,57 +500,67 @@ def __connect_neuronal_populations(self): # conn_dict, syn_dict) def __connect_poisson_bg_input(self): - """ Connects the Poisson generators to the microcircuit.""" + """Connects the Poisson generators to the microcircuit.""" if self.Rank == 0: - print('Connecting Poisson generators for background input.') + print("Connecting Poisson generators for background input.") for i, target_pop in enumerate(self.pops): - conn_dict_poisson = {'rule': 'all_to_all'} + conn_dict_poisson = {"rule": "all_to_all"} syn_dict_poisson = { - 'weight': self.weight_ext, - 'delay': self.net_dict['delay_poisson']} + "weight": self.weight_ext, + "delay": self.net_dict["delay_poisson"], + } ngpu.Connect( - [self.poisson_bg_input[i]], target_pop, - conn_dict_poisson, syn_dict_poisson) + [self.poisson_bg_input[i]], + target_pop, + conn_dict_poisson, + syn_dict_poisson, + ) def __connect_thalamic_stim_input(self): - """ Connects the thalamic input to the neuronal populations.""" + """Connects the thalamic input to the neuronal populations.""" if self.Rank == 0: - print('Connecting thalamic input.') + print("Connecting thalamic input.") # connect Poisson input to thalamic population ngpu.Connect(self.poisson_th, self.thalamic_population) # connect thalamic population to neuronal populations for i, target_pop in enumerate(self.pops): - conn_dict_th = { - 'rule': 'fixed_total_number', - 'N': self.num_th_synapses[i]} - - w_mean = self.weight_th, - w_std = self.weight_th * self.net_dict['weight_rel_std'] - w_min = 0.0, - w_max = w_mean + 3.0*w_std - - d_mean = self.stim_dict['delay_th_mean'] - d_std = (self.stim_dict['delay_th_mean'] * - self.stim_dict['delay_th_rel_std']) + conn_dict_th = {"rule": "fixed_total_number", "N": self.num_th_synapses[i]} + + w_mean = (self.weight_th,) + w_std = self.weight_th * self.net_dict["weight_rel_std"] + w_min = (0.0,) + w_max = w_mean + 3.0 * w_std + + d_mean = self.stim_dict["delay_th_mean"] + d_std = self.stim_dict["delay_th_mean"] * self.stim_dict["delay_th_rel_std"] d_min = self.sim_resolution - d_max = d_mean + 3.0*d_std + d_max = d_mean + 3.0 * d_std syn_dict_th = { - 'weight': {"distribution":"normal_clipped", - "mu":w_mean, "low":w_min, - "high":w_max, - "sigma":w_std}, - 'delay': {"distribution":"normal_clipped", - "mu":d_mean, "low":d_min, - "high":d_max, - "sigma":d_std}} - - ngpu.Connect( - self.thalamic_population, target_pop, - conn_spec=conn_dict_th, syn_spec=syn_dict_th) + "weight": { + "distribution": "normal_clipped", + "mu": w_mean, + "low": w_min, + "high": w_max, + "sigma": w_std, + }, + "delay": { + "distribution": "normal_clipped", + "mu": d_mean, + "low": d_min, + "high": d_max, + "sigma": d_std, + }, + } + ngpu.Connect( + self.thalamic_population, + target_pop, + conn_spec=conn_dict_th, + syn_spec=syn_dict_th, + ) diff --git a/python/Potjans_2014_hc/network_params.py b/python/Potjans_2014_hc/network_params.py index 664f8b61d..5c4fa1aa3 100644 --- a/python/Potjans_2014_hc/network_params.py +++ b/python/Potjans_2014_hc/network_params.py @@ -33,7 +33,7 @@ def get_exc_inh_matrix(val_exc, val_inh, num_pops): - """ Creates a matrix for excitatory and inhibitory values. + """Creates a matrix for excitatory and inhibitory values. Parameters ---------- @@ -58,109 +58,122 @@ def get_exc_inh_matrix(val_exc, val_inh, num_pops): net_dict = { # factor to scale the number of neurons - 'N_scaling': 1.0, # 0.1, + "N_scaling": 1.0, # 0.1, # factor to scale the indegrees - 'K_scaling': 1.0, # 0.1, + "K_scaling": 1.0, # 0.1, # neuron model - 'neuron_model': 'iaf_psc_exp_hc', + "neuron_model": "iaf_psc_exp_hc", # names of the simulated neuronal populations - 'populations': ['L23E', 'L23I', 'L4E', 'L4I', 'L5E', 'L5I', 'L6E', 'L6I'], + "populations": ["L23E", "L23I", "L4E", "L4I", "L5E", "L5I", "L6E", "L6I"], # number of neurons in the different populations (same order as # 'populations') - 'full_num_neurons': - np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]), + "full_num_neurons": np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]), # mean rates of the different populations in the non-scaled version of the # microcircuit (in spikes/s; same order as in 'populations'); # necessary for the scaling of the network. # The values were optained by running this PyNEST microcircuit with 12 MPI # processes and both 'N_scaling' and 'K_scaling' set to 1. - 'full_mean_rates': - np.array([0.943, 3.026, 4.368, 5.882, 7.733, 8.664, 1.096, 7.851]), + "full_mean_rates": np.array([0.943, 3.026, 4.368, 5.882, 7.733, 8.664, 1.096, 7.851]), # connection probabilities (the first index corresponds to the targets # and the second to the sources) - 'conn_probs': - np.array( - [[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0., 0.0076, 0.], - [0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0., 0.0042, 0.], - [0.0077, 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.], - [0.0691, 0.0029, 0.0794, 0.1597, 0.0033, 0., 0.1057, 0.], - [0.1004, 0.0622, 0.0505, 0.0057, 0.0831, 0.3726, 0.0204, 0.], - [0.0548, 0.0269, 0.0257, 0.0022, 0.06, 0.3158, 0.0086, 0.], - [0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, 0.0396, 0.2252], - [0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443]]), + "conn_probs": np.array( + [ + [0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0.0, 0.0076, 0.0], + [0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0.0, 0.0042, 0.0], + [0.0077, 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.0], + [0.0691, 0.0029, 0.0794, 0.1597, 0.0033, 0.0, 0.1057, 0.0], + [0.1004, 0.0622, 0.0505, 0.0057, 0.0831, 0.3726, 0.0204, 0.0], + [0.0548, 0.0269, 0.0257, 0.0022, 0.06, 0.3158, 0.0086, 0.0], + [0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, 0.0396, 0.2252], + [0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443], + ] + ), # mean amplitude of excitatory postsynaptic potential (in mV) - 'PSP_exc_mean': 0.15, + "PSP_exc_mean": 0.15, # relative standard deviation of the weight - 'weight_rel_std': 0.1, + "weight_rel_std": 0.1, # relative inhibitory weight - 'g': -4, + "g": -4, # mean delay of excitatory connections (in ms) - 'delay_exc_mean': 1.5, + "delay_exc_mean": 1.5, # mean delay of inhibitory connections (in ms) - 'delay_inh_mean': 0.75, + "delay_inh_mean": 0.75, # relative standard deviation of the delay of excitatory and # inhibitory connections - 'delay_rel_std': 0.5, - + "delay_rel_std": 0.5, # turn Poisson input on or off (True or False) # if False: DC input is applied for compensation - 'poisson_input': True, + "poisson_input": True, # indegree of external connections to the different populations (same order # as in 'populations') - 'K_ext': np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]), + "K_ext": np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]), # rate of the Poisson generator (in spikes/s) - 'bg_rate': 8., + "bg_rate": 8.0, # delay from the Poisson generator to the network (in ms) - 'delay_poisson': 1.5, - + "delay_poisson": 1.5, # initial conditions for the membrane potential, options are: # 'original': uniform mean and standard deviation for all populations as # used in earlier implementations of the model # 'optimized': population-specific mean and standard deviation, allowing a # reduction of the initial activity burst in the network # (default) - 'V0_type': 'optimized', + "V0_type": "optimized", # parameters of the neuron model - 'neuron_params': { + "neuron_params": { # membrane potential average for the neurons (in mV) - 'V0_mean': {'original': -58.0, - 'optimized': [-68.28, -63.16, -63.33, -63.45, - -63.11, -61.66, -66.72, -61.43]}, + "V0_mean": { + "original": -58.0, + "optimized": [ + -68.28, + -63.16, + -63.33, + -63.45, + -63.11, + -61.66, + -66.72, + -61.43, + ], + }, # standard deviation of the average membrane potential (in mV) - 'V0_std': {'original': 10.0, - 'optimized': [5.36, 4.57, 4.74, 4.94, - 4.94, 4.55, 5.46, 4.48]}, + "V0_std": { + "original": 10.0, + "optimized": [5.36, 4.57, 4.74, 4.94, 4.94, 4.55, 5.46, 4.48], + }, # reset membrane potential of the neurons (in mV) - 'E_L': -65.0, + "E_L": -65.0, # threshold potential of the neurons (in mV) - 'V_th': -50.0, + "V_th": -50.0, # membrane potential after a spike (in mV) - 'V_reset': -65.0, + "V_reset": -65.0, # membrane capacitance (in pF) - 'C_m': 250.0, + "C_m": 250.0, # membrane time constant (in ms) - 'tau_m': 10.0, + "tau_m": 10.0, # time constant of postsynaptic currents (in ms) - 'tau_syn': 0.5, + "tau_syn": 0.5, # refractory period of the neurons after a spike (in ms) - 't_ref': 2.0}} + "t_ref": 2.0, + }, +} # derive matrix of mean PSPs, # the mean PSP of the connection from L4E to L23E is doubled PSP_matrix_mean = get_exc_inh_matrix( - net_dict['PSP_exc_mean'], - net_dict['PSP_exc_mean'] * net_dict['g'], - len(net_dict['populations'])) -PSP_matrix_mean[0, 2] = 2. * net_dict['PSP_exc_mean'] + net_dict["PSP_exc_mean"], + net_dict["PSP_exc_mean"] * net_dict["g"], + len(net_dict["populations"]), +) +PSP_matrix_mean[0, 2] = 2.0 * net_dict["PSP_exc_mean"] updated_dict = { # matrix of mean PSPs - 'PSP_matrix_mean': PSP_matrix_mean, - + "PSP_matrix_mean": PSP_matrix_mean, # matrix of mean delays - 'delay_matrix_mean': get_exc_inh_matrix( - net_dict['delay_exc_mean'], - net_dict['delay_inh_mean'], - len(net_dict['populations']))} + "delay_matrix_mean": get_exc_inh_matrix( + net_dict["delay_exc_mean"], + net_dict["delay_inh_mean"], + len(net_dict["populations"]), + ), +} net_dict.update(updated_dict) diff --git a/python/Potjans_2014_hc/run_microcircuit.py b/python/Potjans_2014_hc/run_microcircuit.py index 9d211f962..d0ab39273 100644 --- a/python/Potjans_2014_hc/run_microcircuit.py +++ b/python/Potjans_2014_hc/run_microcircuit.py @@ -32,13 +32,16 @@ ############################################################################### # Import the necessary modules and start the time measurements. -from stimulus_params import stim_dict -from network_params import net_dict -from sim_params import sim_dict +import time + import network -#import nestgpu as ngpu + +# import nestgpu as ngpu import numpy as np -import time +from network_params import net_dict +from sim_params import sim_dict +from stimulus_params import stim_dict + time_start = time.time() ############################################################################### @@ -60,10 +63,10 @@ net.connect() time_connect = time.time() -net.simulate(sim_dict['t_presim']) +net.simulate(sim_dict["t_presim"]) time_presimulate = time.time() -net.simulate(sim_dict['t_sim']) +net.simulate(sim_dict["t_sim"]) time_simulate = time.time() ############################################################################### @@ -74,10 +77,8 @@ # The computation of spike rates discards the presimulation time to exclude # initialization artifacts. -raster_plot_interval = np.array([stim_dict['th_start'] - 100.0, - stim_dict['th_start'] + 100.0]) -firing_rates_interval = np.array([sim_dict['t_presim'], - sim_dict['t_presim'] + sim_dict['t_sim']]) +raster_plot_interval = np.array([stim_dict["th_start"] - 100.0, stim_dict["th_start"] + 100.0]) +firing_rates_interval = np.array([sim_dict["t_presim"], sim_dict["t_presim"] + sim_dict["t_sim"]]) net.evaluate(raster_plot_interval, firing_rates_interval) time_evaluate = time.time() @@ -86,25 +87,12 @@ # data evaluation and print calls. print( - '\nTimes:\n' + # of Rank {}:\n'.format( .Rank()) + - ' Total time: {:.3f} s\n'.format( - time_evaluate - - time_start) + - ' Time to initialize: {:.3f} s\n'.format( - time_network - - time_start) + - ' Time to create: {:.3f} s\n'.format( - time_create - - time_network) + - ' Time to connect: {:.3f} s\n'.format( - time_connect - - time_create) + - ' Time to presimulate: {:.3f} s\n'.format( - time_presimulate - - time_connect) + - ' Time to simulate: {:.3f} s\n'.format( - time_simulate - - time_presimulate) + - ' Time to evaluate: {:.3f} s\n'.format( - time_evaluate - - time_simulate)) + "\nTimes:\n" + + " Total time: {:.3f} s\n".format(time_evaluate - time_start) # of Rank {}:\n'.format( .Rank()) + + + " Time to initialize: {:.3f} s\n".format(time_network - time_start) + + " Time to create: {:.3f} s\n".format(time_create - time_network) + + " Time to connect: {:.3f} s\n".format(time_connect - time_create) + + " Time to presimulate: {:.3f} s\n".format(time_presimulate - time_connect) + + " Time to simulate: {:.3f} s\n".format(time_simulate - time_presimulate) + + " Time to evaluate: {:.3f} s\n".format(time_evaluate - time_simulate) +) diff --git a/python/Potjans_2014_hc/sim_params.py b/python/Potjans_2014_hc/sim_params.py index b7fbff797..04215e7f2 100644 --- a/python/Potjans_2014_hc/sim_params.py +++ b/python/Potjans_2014_hc/sim_params.py @@ -34,26 +34,27 @@ # The full simulation time is the sum of a presimulation time and the main # simulation time. # presimulation time (in ms) - 't_presim': 1000.0, + "t_presim": 1000.0, # simulation time (in ms) - 't_sim': 10000.0, + "t_sim": 10000.0, # resolution of the simulation (in ms) - 'sim_resolution': 0.1, + "sim_resolution": 0.1, # list of recording devices, default is 'spike_detector'. A 'voltmeter' can # be added to record membrane voltages of the neurons. Nothing will be # recorded if an empty list is given. - 'rec_dev': ['spike_detector'], + "rec_dev": ["spike_detector"], # path to save the output data - 'data_path': os.path.join(os.getcwd(), 'data/'), + "data_path": os.path.join(os.getcwd(), "data/"), # masterseed for NEST GPU and NumPy - 'master_seed': 12349, #55, + "master_seed": 12349, # 55, # number of threads per MPI process - 'local_num_threads': 1, + "local_num_threads": 1, # recording interval of the membrane potential (in ms) - 'rec_V_int': 1.0, + "rec_V_int": 1.0, # if True, data will be overwritten, # if False, a NESTError is raised if the files already exist - 'overwrite_files': True, + "overwrite_files": True, # print the time progress. This should only be used when the simulation # is run on a local machine. - 'print_time': False} + "print_time": False, +} diff --git a/python/Potjans_2014_hc/sim_params_norec.py b/python/Potjans_2014_hc/sim_params_norec.py index db9468144..c57c817e0 100644 --- a/python/Potjans_2014_hc/sim_params_norec.py +++ b/python/Potjans_2014_hc/sim_params_norec.py @@ -34,26 +34,27 @@ # The full simulation time is the sum of a presimulation time and the main # simulation time. # presimulation time (in ms) - 't_presim': 0.1, + "t_presim": 0.1, # simulation time (in ms) - 't_sim': 10000.0, + "t_sim": 10000.0, # resolution of the simulation (in ms) - 'sim_resolution': 0.1, + "sim_resolution": 0.1, # list of recording devices, default is 'spike_detector'. A 'voltmeter' can # be added to record membrane voltages of the neurons. Nothing will be # recorded if an empty list is given. - 'rec_dev': [], + "rec_dev": [], # path to save the output data - 'data_path': os.path.join(os.getcwd(), 'data/'), + "data_path": os.path.join(os.getcwd(), "data/"), # masterseed for NEST GPU and NumPy - 'master_seed': 12349, #55, + "master_seed": 12349, # 55, # number of threads per MPI process - 'local_num_threads': 1, + "local_num_threads": 1, # recording interval of the membrane potential (in ms) - 'rec_V_int': 1.0, + "rec_V_int": 1.0, # if True, data will be overwritten, # if False, a NESTError is raised if the files already exist - 'overwrite_files': True, + "overwrite_files": True, # print the time progress. This should only be used when the simulation # is run on a local machine. - 'print_time': False} + "print_time": False, +} diff --git a/python/Potjans_2014_hc/stimulus_params.py b/python/Potjans_2014_hc/stimulus_params.py index 42d397dfd..c79f53768 100644 --- a/python/Potjans_2014_hc/stimulus_params.py +++ b/python/Potjans_2014_hc/stimulus_params.py @@ -34,34 +34,33 @@ stim_dict = { # optional thalamic input # turn thalamic input on or off (True or False) - 'thalamic_input': False, + "thalamic_input": False, # start of the thalamic input (in ms) - 'th_start': 700.0, + "th_start": 700.0, # duration of the thalamic input (in ms) - 'th_duration': 10.0, + "th_duration": 10.0, # rate of the thalamic input (in spikes/s) - 'th_rate': 120.0, + "th_rate": 120.0, # number of thalamic neurons - 'num_th_neurons': 902, + "num_th_neurons": 902, # connection probabilities of the thalamus to the different populations # (same order as in 'populations' in 'net_dict') - 'conn_probs_th': - np.array([0.0, 0.0, 0.0983, 0.0619, 0.0, 0.0, 0.0512, 0.0196]), + "conn_probs_th": np.array([0.0, 0.0, 0.0983, 0.0619, 0.0, 0.0, 0.0512, 0.0196]), # mean amplitude of the thalamic postsynaptic potential (in mV), # standard deviation will be taken from 'net_dict' - 'PSP_th': 0.15, + "PSP_th": 0.15, # mean delay of the thalamic input (in ms) - 'delay_th_mean': 1.5, + "delay_th_mean": 1.5, # relative standard deviation of the thalamic delay (in ms) - 'delay_th_rel_std': 0.5, - + "delay_th_rel_std": 0.5, # optional DC input # turn DC input on or off (True or False) - 'dc_input': False, + "dc_input": False, # start of the DC input (in ms) - 'dc_start': 650.0, + "dc_start": 650.0, # duration of the DC input (in ms) - 'dc_dur': 100.0, + "dc_dur": 100.0, # amplitude of the DC input (in pA); final amplitude is population-specific # and will be obtained by multiplication with 'K_ext' - 'dc_amp': 0.3} + "dc_amp": 0.3, +} diff --git a/python/Potjans_2014_s/README.rst b/python/Potjans_2014_s/README.rst index 6fff2a8dc..58e7933d5 100644 --- a/python/Potjans_2014_s/README.rst +++ b/python/Potjans_2014_s/README.rst @@ -98,7 +98,7 @@ References .. [1] Potjans TC. and Diesmann M. 2014. The cell-type specific cortical microcircuit: relating structure and activity in a full-scale spiking network model. Cerebral Cortex. 24(3):785–806. DOI: `10.1093/cercor/bhs358 <https://doi.org/10.1093/cercor/bhs358>`__. - + .. [2] van Albada SJ., Rowley AG., Senk J., Hopkins M., Schmidt M., Stokes AB., Lester DR., Diesmann M. and Furber SB. 2018. Performance Comparison of the Digital Neuromorphic Hardware SpiNNaker and the Neural Network Simulation Software NEST for a Full-Scale Cortical Microcircuit Model. diff --git a/python/Potjans_2014_s/eval_microcircuit_time.py b/python/Potjans_2014_s/eval_microcircuit_time.py index 3c2fa89b9..9dbb3956f 100644 --- a/python/Potjans_2014_s/eval_microcircuit_time.py +++ b/python/Potjans_2014_s/eval_microcircuit_time.py @@ -32,13 +32,16 @@ ############################################################################### # Import the necessary modules and start the time measurements. -from stimulus_params import stim_dict -from network_params import net_dict -from sim_params_norec import sim_dict +import time + import network -#import nestgpu as ngpu + +# import nestgpu as ngpu import numpy as np -import time +from network_params import net_dict +from sim_params_norec import sim_dict +from stimulus_params import stim_dict + time_start = time.time() ############################################################################### @@ -60,10 +63,10 @@ net.connect() time_connect = time.time() -net.simulate(sim_dict['t_presim']) +net.simulate(sim_dict["t_presim"]) time_presimulate = time.time() -net.simulate(sim_dict['t_sim']) +net.simulate(sim_dict["t_sim"]) time_simulate = time.time() @@ -72,22 +75,11 @@ # data evaluation and print calls. print( - '\nTimes:\n' + # of Rank {}:\n'.format( .Rank()) + - ' Total time: {:.3f} s\n'.format( - time_simulate - - time_start) + - ' Time to initialize: {:.3f} s\n'.format( - time_network - - time_start) + - ' Time to create: {:.3f} s\n'.format( - time_create - - time_network) + - ' Time to connect: {:.3f} s\n'.format( - time_connect - - time_create) + - ' Time to calibrate: {:.3f} s\n'.format( - time_presimulate - - time_connect) + - ' Time to simulate: {:.3f} s\n'.format( - time_simulate - - time_presimulate) ) + "\nTimes:\n" + + " Total time: {:.3f} s\n".format(time_simulate - time_start) # of Rank {}:\n'.format( .Rank()) + + + " Time to initialize: {:.3f} s\n".format(time_network - time_start) + + " Time to create: {:.3f} s\n".format(time_create - time_network) + + " Time to connect: {:.3f} s\n".format(time_connect - time_create) + + " Time to calibrate: {:.3f} s\n".format(time_presimulate - time_connect) + + " Time to simulate: {:.3f} s\n".format(time_simulate - time_presimulate) +) diff --git a/python/Potjans_2014_s/helpers.py b/python/Potjans_2014_s/helpers.py index 00f1a3f95..ac7f89b01 100644 --- a/python/Potjans_2014_s/helpers.py +++ b/python/Potjans_2014_s/helpers.py @@ -29,14 +29,17 @@ """ -from matplotlib.patches import Polygon -import matplotlib.pyplot as plt import os import sys + +import matplotlib.pyplot as plt import numpy as np -if 'DISPLAY' not in os.environ: +from matplotlib.patches import Polygon + +if "DISPLAY" not in os.environ: import matplotlib - matplotlib.use('Agg') + + matplotlib.use("Agg") def num_synapses_from_conn_probs(conn_probs, popsize1, popsize2): @@ -60,12 +63,12 @@ def num_synapses_from_conn_probs(conn_probs, popsize1, popsize2): Matrix of synapse numbers. """ prod = np.outer(popsize1, popsize2) - num_synapses = np.log(1. - conn_probs) / np.log((prod - 1.) / prod) + num_synapses = np.log(1.0 - conn_probs) / np.log((prod - 1.0) / prod) return num_synapses def postsynaptic_potential_to_current(C_m, tau_m, tau_syn): - """ Computes a factor to convert postsynaptic potentials to currents. + """Computes a factor to convert postsynaptic potentials to currents. The time course of the postsynaptic potential ``v`` is computed as :math: `v(t)=(i*h)(t)` @@ -108,16 +111,16 @@ def postsynaptic_potential_to_current(C_m, tau_m, tau_syn): (in pA). """ - sub = 1. / (tau_syn - tau_m) + sub = 1.0 / (tau_syn - tau_m) pre = tau_m * tau_syn / C_m * sub frac = (tau_m / tau_syn) ** sub - PSC_over_PSP = 1. / (pre * (frac**tau_m - frac**tau_syn)) + PSC_over_PSP = 1.0 / (pre * (frac**tau_m - frac**tau_syn)) return PSC_over_PSP def dc_input_compensating_poisson(bg_rate, K_ext, tau_syn, PSC_ext): - """ Computes DC input if no Poisson input is provided to the microcircuit. + """Computes DC input if no Poisson input is provided to the microcircuit. Parameters ---------- @@ -140,18 +143,19 @@ def dc_input_compensating_poisson(bg_rate, K_ext, tau_syn, PSC_ext): def adjust_weights_and_input_to_synapse_scaling( - full_num_neurons, - full_num_synapses, - K_scaling, - mean_PSC_matrix, - PSC_ext, - tau_syn, - full_mean_rates, - DC_amp, - poisson_input, - bg_rate, - K_ext): - """ Adjusts weights and external input to scaling of indegrees. + full_num_neurons, + full_num_synapses, + K_scaling, + mean_PSC_matrix, + PSC_ext, + tau_syn, + full_mean_rates, + DC_amp, + poisson_input, + bg_rate, + K_ext, +): + """Adjusts weights and external input to scaling of indegrees. The recurrent and external weights are adjusted to the scaling of the indegrees. Extra DC input is added to compensate for the @@ -196,22 +200,19 @@ def adjust_weights_and_input_to_synapse_scaling( PSC_ext_new = PSC_ext / np.sqrt(K_scaling) # recurrent input of full network - indegree_matrix = \ - full_num_synapses / full_num_neurons[:, np.newaxis] - input_rec = np.sum(mean_PSC_matrix * indegree_matrix * full_mean_rates, - axis=1) + indegree_matrix = full_num_synapses / full_num_neurons[:, np.newaxis] + input_rec = np.sum(mean_PSC_matrix * indegree_matrix * full_mean_rates, axis=1) - DC_amp_new = DC_amp \ - + 0.001 * tau_syn * (1. - np.sqrt(K_scaling)) * input_rec + DC_amp_new = DC_amp + 0.001 * tau_syn * (1.0 - np.sqrt(K_scaling)) * input_rec if poisson_input: input_ext = PSC_ext * K_ext * bg_rate - DC_amp_new += 0.001 * tau_syn * (1. - np.sqrt(K_scaling)) * input_ext + DC_amp_new += 0.001 * tau_syn * (1.0 - np.sqrt(K_scaling)) * input_ext return PSC_matrix_new, PSC_ext_new, DC_amp_new def plot_raster(path, name, begin, end, N_scaling): - """ Creates a spike raster plot of the network activity. + """Creates a spike raster plot of the network activity. Parameters ----------- @@ -232,34 +233,33 @@ def plot_raster(path, name, begin, end, N_scaling): """ fs = 18 # fontsize - ylabels = ['L2/3', 'L4', 'L5', 'L6'] - color_list = np.tile(['#595289', '#af143c'], 4) + ylabels = ["L2/3", "L4", "L5", "L6"] + color_list = np.tile(["#595289", "#af143c"], 4) sd_names, node_ids, data = __load_spike_times(path, name, begin, end) last_node_id = node_ids[-1, -1] mod_node_ids = np.abs(node_ids - last_node_id) + 1 - label_pos = [(mod_node_ids[i, 0] + mod_node_ids[i + 1, 1]) / - 2. for i in np.arange(0, 8, 2)] + label_pos = [(mod_node_ids[i, 0] + mod_node_ids[i + 1, 1]) / 2.0 for i in np.arange(0, 8, 2)] stp = 1 if N_scaling > 0.1: - stp = int(10. * N_scaling) - print(' Only spikes of neurons in steps of {} are shown.'.format(stp)) + stp = int(10.0 * N_scaling) + print(" Only spikes of neurons in steps of {} are shown.".format(stp)) plt.figure(figsize=(8, 6)) for i, n in enumerate(sd_names): - times = data[i]['time_ms'] - neurons = np.abs(data[i]['sender'] - last_node_id) + 1 - plt.plot(times[::stp], neurons[::stp], '.', color=color_list[i]) - plt.xlabel('time [ms]', fontsize=fs) + times = data[i]["time_ms"] + neurons = np.abs(data[i]["sender"] - last_node_id) + 1 + plt.plot(times[::stp], neurons[::stp], ".", color=color_list[i]) + plt.xlabel("time [ms]", fontsize=fs) plt.xticks(fontsize=fs) plt.yticks(label_pos, ylabels, fontsize=fs) - plt.savefig(os.path.join(path, 'raster_plot.png'), dpi=300) + plt.savefig(os.path.join(path, "raster_plot.png"), dpi=300) def firing_rates(path, name, begin, end): - """ Computes mean and standard deviation of firing rates per population. + """Computes mean and standard deviation of firing rates per population. The firing rate of each neuron in each population is computed and stored in a .dat file in the directory of the spike detectors. The mean firing @@ -285,23 +285,21 @@ def firing_rates(path, name, begin, end): all_mean_rates = [] all_std_rates = [] for i, n in enumerate(sd_names): - senders = data[i]['sender'] + senders = data[i]["sender"] # 1 more bin than node ids per population bins = np.arange(node_ids[i, 0], node_ids[i, 1] + 2) spike_count_per_neuron, _ = np.histogram(senders, bins=bins) - rate_per_neuron = spike_count_per_neuron * 1000. / (end - begin) - np.savetxt(os.path.join(path, ('rate' + str(i) + '.dat')), - rate_per_neuron) + rate_per_neuron = spike_count_per_neuron * 1000.0 / (end - begin) + np.savetxt(os.path.join(path, ("rate" + str(i) + ".dat")), rate_per_neuron) # zeros are included all_mean_rates.append(np.mean(rate_per_neuron)) all_std_rates.append(np.std(rate_per_neuron)) - print('Mean rates: {} spikes/s'.format(np.around(all_mean_rates, decimals=3))) - print('Standard deviation of rates: {} spikes/s'.format( - np.around(all_std_rates, decimals=3))) + print("Mean rates: {} spikes/s".format(np.around(all_mean_rates, decimals=3))) + print("Standard deviation of rates: {} spikes/s".format(np.around(all_std_rates, decimals=3))) def boxplot(path, populations): - """ Creates a boxblot of the firing rates of all populations. + """Creates a boxblot of the firing rates of all populations. To create the boxplot, the firing rates of each neuron in each population need to be computed with the function ``firing_rate()``. @@ -319,29 +317,36 @@ def boxplot(path, populations): """ fs = 18 - pop_names = [string.replace('23', '2/3') for string in populations] + pop_names = [string.replace("23", "2/3") for string in populations] label_pos = list(range(len(populations), 0, -1)) - color_list = ['#af143c', '#595289'] - medianprops = dict(linestyle='-', linewidth=2.5, color='black') - meanprops = dict(linestyle='--', linewidth=2.5, color='lightgray') + color_list = ["#af143c", "#595289"] + medianprops = dict(linestyle="-", linewidth=2.5, color="black") + meanprops = dict(linestyle="--", linewidth=2.5, color="lightgray") rates_per_neuron_rev = [] for i in np.arange(len(populations))[::-1]: - rates_per_neuron_rev.append( - np.loadtxt(os.path.join(path, ('rate' + str(i) + '.dat')))) + rates_per_neuron_rev.append(np.loadtxt(os.path.join(path, ("rate" + str(i) + ".dat")))) plt.figure(figsize=(8, 6)) - bp = plt.boxplot(rates_per_neuron_rev, 0, 'rs', 0, medianprops=medianprops, - meanprops=meanprops, meanline=True, showmeans=True) - plt.setp(bp['boxes'], color='black') - plt.setp(bp['whiskers'], color='black') - plt.setp(bp['fliers'], color='red', marker='+') + bp = plt.boxplot( + rates_per_neuron_rev, + 0, + "rs", + 0, + medianprops=medianprops, + meanprops=meanprops, + meanline=True, + showmeans=True, + ) + plt.setp(bp["boxes"], color="black") + plt.setp(bp["whiskers"], color="black") + plt.setp(bp["fliers"], color="red", marker="+") # boxcolors for i in np.arange(len(populations)): boxX = [] boxY = [] - box = bp['boxes'][i] + box = bp["boxes"][i] for j in list(range(5)): boxX.append(box.get_xdata()[j]) boxY.append(box.get_ydata()[j]) @@ -349,14 +354,14 @@ def boxplot(path, populations): k = i % 2 boxPolygon = Polygon(boxCoords, facecolor=color_list[k]) plt.gca().add_patch(boxPolygon) - plt.xlabel('firing rate [spikes/s]', fontsize=fs) + plt.xlabel("firing rate [spikes/s]", fontsize=fs) plt.yticks(label_pos, pop_names, fontsize=fs) plt.xticks(fontsize=fs) - plt.savefig(os.path.join(path, 'box_plot.png'), dpi=300) + plt.savefig(os.path.join(path, "box_plot.png"), dpi=300) def __gather_metadata(path): - """ Reads first and last ids of + """Reads first and last ids of neurons in each population. Parameters @@ -371,16 +376,16 @@ def __gather_metadata(path): """ # load node IDs - node_idfile = open(path + 'population_nodeids.dat', 'r') + node_idfile = open(path + "population_nodeids.dat", "r") node_ids = [] for l in node_idfile: node_ids.append(l.split()) - node_ids = np.array(node_ids, dtype='i4') + node_ids = np.array(node_ids, dtype="i4") return node_ids def __load_spike_times(path, name, begin, end): - """ Loads spike times of each spike detector. + """Loads spike times of each spike detector. Parameters ---------- @@ -402,20 +407,19 @@ def __load_spike_times(path, name, begin, end): """ node_ids = __gather_metadata(path) data = {} - dtype = {'names': ('sender', 'time_ms'), # as in header - 'formats': ('i4', 'f8')} - #print(node_ids) + dtype = {"names": ("sender", "time_ms"), "formats": ("i4", "f8")} # as in header + # print(node_ids) sd_names = {} - + for i_pop in range(8): - fn = os.path.join(path, 'spike_times_' + str(i_pop) + '.dat') + fn = os.path.join(path, "spike_times_" + str(i_pop) + ".dat") data_i_raw = np.loadtxt(fn, skiprows=1, dtype=dtype) - data_i_raw = np.sort(data_i_raw, order='time_ms') + data_i_raw = np.sort(data_i_raw, order="time_ms") # begin and end are included if they exist - low = np.searchsorted(data_i_raw['time_ms'], v=begin, side='left') - high = np.searchsorted(data_i_raw['time_ms'], v=end, side='right') + low = np.searchsorted(data_i_raw["time_ms"], v=begin, side="left") + high = np.searchsorted(data_i_raw["time_ms"], v=end, side="right") data[i_pop] = data_i_raw[low:high] - sd_names[i_pop] = 'spike_times_' + str(i_pop) + sd_names[i_pop] = "spike_times_" + str(i_pop) return sd_names, node_ids, data diff --git a/python/Potjans_2014_s/network.py b/python/Potjans_2014_s/network.py index 1d39ac735..c3c62901d 100644 --- a/python/Potjans_2014_s/network.py +++ b/python/Potjans_2014_s/network.py @@ -28,13 +28,14 @@ """ import os -import numpy as np -import nestgpu as ngpu + import helpers +import nestgpu as ngpu +import numpy as np class Network: - """ Provides functions to setup NEST GPU, to create and connect all nodes + """Provides functions to setup NEST GPU, to create and connect all nodes of the network, to simulate, and to evaluate the resulting spike data. Instantiating a Network object derives dependent parameters and already @@ -61,17 +62,16 @@ def __init__(self, sim_dict, net_dict, stim_dict=None): self.Rank = 0 # data directory - self.data_path = sim_dict['data_path'] + self.data_path = sim_dict["data_path"] if self.Rank == 0: if os.path.isdir(self.data_path): - message = ' Directory already existed.' - if self.sim_dict['overwrite_files']: - message += ' Old data will be overwritten.' + message = " Directory already existed." + if self.sim_dict["overwrite_files"]: + message += " Old data will be overwritten." else: os.mkdir(self.data_path) - message = ' Directory has been created.' - print('Data will be written to: {}\n{}\n'.format(self.data_path, - message)) + message = " Directory has been created." + print("Data will be written to: {}\n{}\n".format(self.data_path, message)) # derive parameters based on input dictionaries self.__derive_parameters() @@ -80,23 +80,23 @@ def __init__(self, sim_dict, net_dict, stim_dict=None): self.__setup_ngpu() def create(self): - """ Creates all network nodes. + """Creates all network nodes. Neuronal populations and recording and stimulating devices are created. """ self.__create_neuronal_populations() - if len(self.sim_dict['rec_dev']) > 0: + if len(self.sim_dict["rec_dev"]) > 0: self.__create_recording_devices() - if self.net_dict['poisson_input']: + if self.net_dict["poisson_input"]: self.__create_poisson_bg_input() - if self.stim_dict['thalamic_input']: + if self.stim_dict["thalamic_input"]: self.__create_thalamic_stim_input() - if self.stim_dict['dc_input']: + if self.stim_dict["dc_input"]: self.__create_dc_stim_input() def connect(self): - """ Connects the network. + """Connects the network. Recurrent connections among neurons of the neuronal populations are established, and recording and stimulating devices are connected. @@ -114,20 +114,20 @@ def connect(self): """ self.__connect_neuronal_populations() - #if len(self.sim_dict['rec_dev']) > 0: + # if len(self.sim_dict['rec_dev']) > 0: # self.__connect_recording_devices() - if self.net_dict['poisson_input']: + if self.net_dict["poisson_input"]: self.__connect_poisson_bg_input() - if self.stim_dict['thalamic_input']: + if self.stim_dict["thalamic_input"]: self.__connect_thalamic_stim_input() - if self.stim_dict['dc_input']: + if self.stim_dict["dc_input"]: self.__connect_dc_stim_input() - #ngpu.Prepare() - #ngpu.Cleanup() + # ngpu.Prepare() + # ngpu.Cleanup() def simulate(self, t_sim): - """ Simulates the microcircuit. + """Simulates the microcircuit. Parameters ---------- @@ -136,12 +136,12 @@ def simulate(self, t_sim): """ if self.Rank == 0: - print('Simulating {} ms.'.format(t_sim)) + print("Simulating {} ms.".format(t_sim)) ngpu.Simulate(t_sim) def evaluate(self, raster_plot_interval, firing_rates_interval): - """ Displays simulation results. + """Displays simulation results. Creates a spike raster plot. Calculates the firing rate of each population and displays them as a @@ -168,85 +168,95 @@ def evaluate(self, raster_plot_interval, firing_rates_interval): spike_times_list = ngpu.GetRecSpikeTimes(population) for i_neur in range(len(population)): spike_times = spike_times_list[i_neur] - if (len(spike_times) != 0): + if len(spike_times) != 0: # print("i_pop:", i_pop, " i_neur:", i_neur, " n_spikes:", # len(spike_times)) for t in spike_times: data.append([population[i_neur], t]) arr = np.array(data) - fn = os.path.join(self.data_path, 'spike_times_' + str(i_pop) + - '.dat') - fmt='%d\t%.3f' - np.savetxt(fn, arr, fmt=fmt, header="sender time_ms", - comments='') + fn = os.path.join(self.data_path, "spike_times_" + str(i_pop) + ".dat") + fmt = "%d\t%.3f" + np.savetxt(fn, arr, fmt=fmt, header="sender time_ms", comments="") if self.Rank == 0: - print('Interval to plot spikes: {} ms'.format(raster_plot_interval)) + print("Interval to plot spikes: {} ms".format(raster_plot_interval)) helpers.plot_raster( self.data_path, - 'spike_detector', + "spike_detector", raster_plot_interval[0], raster_plot_interval[1], - self.net_dict['N_scaling']) + self.net_dict["N_scaling"], + ) - print('Interval to compute firing rates: {} ms'.format( - firing_rates_interval)) + print("Interval to compute firing rates: {} ms".format(firing_rates_interval)) helpers.firing_rates( - self.data_path, 'spike_detector', - firing_rates_interval[0], firing_rates_interval[1]) - helpers.boxplot(self.data_path, self.net_dict['populations']) + self.data_path, + "spike_detector", + firing_rates_interval[0], + firing_rates_interval[1], + ) + helpers.boxplot(self.data_path, self.net_dict["populations"]) def __derive_parameters(self): """ Derives and adjusts parameters and stores them as class attributes. """ - self.num_pops = len(self.net_dict['populations']) + self.num_pops = len(self.net_dict["populations"]) # total number of synapses between neuronal populations before scaling full_num_synapses = helpers.num_synapses_from_conn_probs( - self.net_dict['conn_probs'], - self.net_dict['full_num_neurons'], - self.net_dict['full_num_neurons']) + self.net_dict["conn_probs"], + self.net_dict["full_num_neurons"], + self.net_dict["full_num_neurons"], + ) # scaled numbers of neurons and synapses - self.num_neurons = np.round((self.net_dict['full_num_neurons'] * - self.net_dict['N_scaling'])).astype(int) - self.num_synapses = np.round((full_num_synapses * - self.net_dict['N_scaling'] * - self.net_dict['K_scaling'])).astype(int) - self.ext_indegrees = np.round((self.net_dict['K_ext'] * - self.net_dict['K_scaling'])).astype(int) + self.num_neurons = np.round((self.net_dict["full_num_neurons"] * self.net_dict["N_scaling"])).astype(int) + self.num_synapses = np.round( + (full_num_synapses * self.net_dict["N_scaling"] * self.net_dict["K_scaling"]) + ).astype(int) + self.ext_indegrees = np.round((self.net_dict["K_ext"] * self.net_dict["K_scaling"])).astype(int) # conversion from PSPs to PSCs PSC_over_PSP = helpers.postsynaptic_potential_to_current( - self.net_dict['neuron_params']['C_m'], - self.net_dict['neuron_params']['tau_m'], - self.net_dict['neuron_params']['tau_syn']) - PSC_matrix_mean = self.net_dict['PSP_matrix_mean'] * PSC_over_PSP - PSC_ext = self.net_dict['PSP_exc_mean'] * PSC_over_PSP + self.net_dict["neuron_params"]["C_m"], + self.net_dict["neuron_params"]["tau_m"], + self.net_dict["neuron_params"]["tau_syn"], + ) + PSC_matrix_mean = self.net_dict["PSP_matrix_mean"] * PSC_over_PSP + PSC_ext = self.net_dict["PSP_exc_mean"] * PSC_over_PSP # DC input compensates for potentially missing Poisson input - if self.net_dict['poisson_input']: + if self.net_dict["poisson_input"]: DC_amp = np.zeros(self.num_pops) else: if self.Rank == 0: - print('DC input compensates for missing Poisson input.\n') + print("DC input compensates for missing Poisson input.\n") DC_amp = helpers.dc_input_compensating_poisson( - self.net_dict['bg_rate'], self.net_dict['K_ext'], - self.net_dict['neuron_params']['tau_syn'], - PSC_ext) + self.net_dict["bg_rate"], + self.net_dict["K_ext"], + self.net_dict["neuron_params"]["tau_syn"], + PSC_ext, + ) # adjust weights and DC amplitude if the indegree is scaled - if self.net_dict['K_scaling'] != 1: - PSC_matrix_mean, PSC_ext, DC_amp = \ - helpers.adjust_weights_and_input_to_synapse_scaling( - self.net_dict['full_num_neurons'], - full_num_synapses, self.net_dict['K_scaling'], - PSC_matrix_mean, PSC_ext, - self.net_dict['neuron_params']['tau_syn'], - self.net_dict['full_mean_rates'], - DC_amp, - self.net_dict['poisson_input'], - self.net_dict['bg_rate'], self.net_dict['K_ext']) + if self.net_dict["K_scaling"] != 1: + ( + PSC_matrix_mean, + PSC_ext, + DC_amp, + ) = helpers.adjust_weights_and_input_to_synapse_scaling( + self.net_dict["full_num_neurons"], + full_num_synapses, + self.net_dict["K_scaling"], + PSC_matrix_mean, + PSC_ext, + self.net_dict["neuron_params"]["tau_syn"], + self.net_dict["full_mean_rates"], + DC_amp, + self.net_dict["poisson_input"], + self.net_dict["bg_rate"], + self.net_dict["K_ext"], + ) # store final parameters as class attributes self.weight_matrix_mean = PSC_matrix_mean @@ -254,44 +264,39 @@ def __derive_parameters(self): self.DC_amp = DC_amp # thalamic input - if self.stim_dict['thalamic_input']: + if self.stim_dict["thalamic_input"]: num_th_synapses = helpers.num_synapses_from_conn_probs( - self.stim_dict['conn_probs_th'], - self.stim_dict['num_th_neurons'], - self.net_dict['full_num_neurons'])[0] - self.weight_th = self.stim_dict['PSP_th'] * PSC_over_PSP - if self.net_dict['K_scaling'] != 1: - num_th_synapses *= self.net_dict['K_scaling'] - self.weight_th /= np.sqrt(self.net_dict['K_scaling']) + self.stim_dict["conn_probs_th"], + self.stim_dict["num_th_neurons"], + self.net_dict["full_num_neurons"], + )[0] + self.weight_th = self.stim_dict["PSP_th"] * PSC_over_PSP + if self.net_dict["K_scaling"] != 1: + num_th_synapses *= self.net_dict["K_scaling"] + self.weight_th /= np.sqrt(self.net_dict["K_scaling"]) self.num_th_synapses = np.round(num_th_synapses).astype(int) if self.Rank == 0: - message = '' - if self.net_dict['N_scaling'] != 1: - message += \ - 'Neuron numbers are scaled by a factor of {:.3f}.\n'.format( - self.net_dict['N_scaling']) - if self.net_dict['K_scaling'] != 1: - message += \ - 'Indegrees are scaled by a factor of {:.3f}.'.format( - self.net_dict['K_scaling']) - message += '\n Weights and DC input are adjusted to compensate.\n' + message = "" + if self.net_dict["N_scaling"] != 1: + message += "Neuron numbers are scaled by a factor of {:.3f}.\n".format(self.net_dict["N_scaling"]) + if self.net_dict["K_scaling"] != 1: + message += "Indegrees are scaled by a factor of {:.3f}.".format(self.net_dict["K_scaling"]) + message += "\n Weights and DC input are adjusted to compensate.\n" print(message) def __setup_ngpu(self): - """ Initializes NEST GPU. - - """ + """Initializes NEST GPU.""" # set seeds for random number generation - master_seed = self.sim_dict['master_seed'] + master_seed = self.sim_dict["master_seed"] ngpu.SetRandomSeed(master_seed) - ngpu.SetKernelStatus({'print_time': self.sim_dict['print_time']}) - self.sim_resolution = self.sim_dict['sim_resolution'] + ngpu.SetKernelStatus({"print_time": self.sim_dict["print_time"]}) + self.sim_resolution = self.sim_dict["sim_resolution"] def __create_neuronal_populations(self): - """ Creates the neuronal populations. + """Creates the neuronal populations. The neuronal populations are created and the parameters are assigned to them. The initial membrane potential of the neurons is drawn from @@ -300,78 +305,83 @@ def __create_neuronal_populations(self): The first and last neuron id of each population is written to file. """ if self.Rank == 0: - print('Creating neuronal populations.') + print("Creating neuronal populations.") self.pops = [] for i in np.arange(self.num_pops): - population = ngpu.Create(self.net_dict['neuron_model'], - self.num_neurons[i]) - - tau_syn_ex=self.net_dict['neuron_params']['tau_syn'] - tau_syn_in=self.net_dict['neuron_params']['tau_syn'] - E_L=self.net_dict['neuron_params']['E_L'] - V_th=self.net_dict['neuron_params']['V_th'] - V_reset=self.net_dict['neuron_params']['V_reset'] - t_ref=self.net_dict['neuron_params']['t_ref'] - I_e=self.DC_amp[i] - - ngpu.SetStatus(population, {"tau_ex":tau_syn_ex, - "tau_in":tau_syn_in, - "E_L":E_L, - "Theta_rel":V_th - E_L, - "V_reset_rel":V_reset - E_L, - "t_ref":t_ref, - "I_e":I_e}) - - if self.net_dict['V0_type'] == 'optimized': - V_rel_mean = self.net_dict['neuron_params']['V0_mean'] \ - ['optimized'][i] - E_L - V_std = self.net_dict['neuron_params']['V0_std'] \ - ['optimized'][i] - elif self.net_dict['V0_type'] == 'original': - V_rel_mean = self.net_dict['neuron_params']['V0_mean'] \ - ['original'] - E_L, - V_std = self.net_dict['neuron_params']['V0_std']['original'] + population = ngpu.Create(self.net_dict["neuron_model"], self.num_neurons[i]) + + tau_syn_ex = self.net_dict["neuron_params"]["tau_syn"] + tau_syn_in = self.net_dict["neuron_params"]["tau_syn"] + E_L = self.net_dict["neuron_params"]["E_L"] + V_th = self.net_dict["neuron_params"]["V_th"] + V_reset = self.net_dict["neuron_params"]["V_reset"] + t_ref = self.net_dict["neuron_params"]["t_ref"] + I_e = self.DC_amp[i] + + ngpu.SetStatus( + population, + { + "tau_ex": tau_syn_ex, + "tau_in": tau_syn_in, + "E_L": E_L, + "Theta_rel": V_th - E_L, + "V_reset_rel": V_reset - E_L, + "t_ref": t_ref, + "I_e": I_e, + }, + ) + + if self.net_dict["V0_type"] == "optimized": + V_rel_mean = self.net_dict["neuron_params"]["V0_mean"]["optimized"][i] - E_L + V_std = self.net_dict["neuron_params"]["V0_std"]["optimized"][i] + elif self.net_dict["V0_type"] == "original": + V_rel_mean = (self.net_dict["neuron_params"]["V0_mean"]["original"] - E_L,) + V_std = self.net_dict["neuron_params"]["V0_std"]["original"] else: - raise Exception( - 'V0_type incorrect. ' + - 'Valid options are "optimized" and "original".') - - #print("V_rel_mean", V_rel_mean) - #print("V_std", V_std) - #print("pop size: ", len(population)) - ngpu.SetStatus(population, {"V_m_rel": {"distribution":"normal", - "mu":V_rel_mean, - "sigma":V_std } } ) + raise Exception("V0_type incorrect. " + 'Valid options are "optimized" and "original".') + + # print("V_rel_mean", V_rel_mean) + # print("V_std", V_std) + # print("pop size: ", len(population)) + ngpu.SetStatus( + population, + { + "V_m_rel": { + "distribution": "normal", + "mu": V_rel_mean, + "sigma": V_std, + } + }, + ) self.pops.append(population) # write node ids to file if self.Rank == 0: - fn = os.path.join(self.data_path, 'population_nodeids.dat') - with open(fn, 'w+') as f: + fn = os.path.join(self.data_path, "population_nodeids.dat") + with open(fn, "w+") as f: for pop in self.pops: - f.write('{} {}\n'.format(pop[0], - pop[len(pop)-1])) + f.write("{} {}\n".format(pop[0], pop[len(pop) - 1])) def __create_recording_devices(self): - """ Creates one recording device of each kind per population. + """Creates one recording device of each kind per population. Only devices which are given in ``sim_dict['rec_dev']`` are created. """ if self.Rank == 0: - print('Creating recording devices.') + print("Creating recording devices.") - if 'spike_detector' in self.sim_dict['rec_dev']: + if "spike_detector" in self.sim_dict["rec_dev"]: if self.Rank == 0: - print(' Activating spike time recording.') + print(" Activating spike time recording.") for pop in self.pops: ngpu.ActivateRecSpikeTimes(pop, 1000) - - #self.spike_detectors = ngpu.Create('spike_detector', + + # self.spike_detectors = ngpu.Create('spike_detector', # self.num_pops) - #if 'voltmeter' in self.sim_dict['rec_dev']: + # if 'voltmeter' in self.sim_dict['rec_dev']: # if self.Rank == 0: # print(' Creating voltmeters.') # self.voltmeters = ngpu.CreateRecord('V_m_rel', @@ -379,7 +389,7 @@ def __create_recording_devices(self): # params=vm_dict) def __create_poisson_bg_input(self): - """ Creates the Poisson generators for ongoing background input if + """Creates the Poisson generators for ongoing background input if specified in ``network_params.py``. If ``poisson_input`` is ``False``, DC input is applied for compensation @@ -387,17 +397,15 @@ def __create_poisson_bg_input(self): """ if self.Rank == 0: - print('Creating Poisson generators for background input.') + print("Creating Poisson generators for background input.") - self.poisson_bg_input = ngpu.Create('poisson_generator', - self.num_pops) - rate_list = self.net_dict['bg_rate'] * self.ext_indegrees + self.poisson_bg_input = ngpu.Create("poisson_generator", self.num_pops) + rate_list = self.net_dict["bg_rate"] * self.ext_indegrees for i_pop in range(self.num_pops): - ngpu.SetStatus([self.poisson_bg_input[i_pop]], - "rate", rate_list[i_pop]) + ngpu.SetStatus([self.poisson_bg_input[i_pop]], "rate", rate_list[i_pop]) def __create_thalamic_stim_input(self): - """ Creates the thalamic neuronal population if specified in + """Creates the thalamic neuronal population if specified in ``stim_dict``. Thalamic neurons are of type ``parrot_neuron`` and receive input from a @@ -407,63 +415,68 @@ def __create_thalamic_stim_input(self): """ if self.Rank == 0: - print('Creating thalamic input for external stimulation.') + print("Creating thalamic input for external stimulation.") - self.thalamic_population = ngpu.Create( - 'parrot_neuron', n=self.stim_dict['num_th_neurons']) + self.thalamic_population = ngpu.Create("parrot_neuron", n=self.stim_dict["num_th_neurons"]) - self.poisson_th = ngpu.Create('poisson_generator') + self.poisson_th = ngpu.Create("poisson_generator") self.poisson_th.set( - rate=self.stim_dict['th_rate'], - start=self.stim_dict['th_start'], - stop=(self.stim_dict['th_start'] + self.stim_dict['th_duration'])) + rate=self.stim_dict["th_rate"], + start=self.stim_dict["th_start"], + stop=(self.stim_dict["th_start"] + self.stim_dict["th_duration"]), + ) def __connect_neuronal_populations(self): - """ Creates the recurrent connections between neuronal populations. """ + """Creates the recurrent connections between neuronal populations.""" if self.Rank == 0: - print('Connecting neuronal populations recurrently.') + print("Connecting neuronal populations recurrently.") for i, target_pop in enumerate(self.pops): for j, source_pop in enumerate(self.pops): - if self.num_synapses[i][j] >= 0.: + if self.num_synapses[i][j] >= 0.0: conn_dict_rec = { - 'rule': 'fixed_total_number', - 'total_num': self.num_synapses[i][j]} + "rule": "fixed_total_number", + "total_num": self.num_synapses[i][j], + } w_mean = self.weight_matrix_mean[i][j] - w_std = abs(self.weight_matrix_mean[i][j] * - self.net_dict['weight_rel_std']) - + w_std = abs(self.weight_matrix_mean[i][j] * self.net_dict["weight_rel_std"]) + if w_mean < 0: - w_min = w_mean-3.0*w_std + w_min = w_mean - 3.0 * w_std w_max = 0.0 i_receptor = 1 else: w_min = 0.0 - w_max = w_mean+3.0*w_std + w_max = w_mean + 3.0 * w_std i_receptor = 0 - - d_mean = self.net_dict['delay_matrix_mean'][i][j] - d_std = (self.net_dict['delay_matrix_mean'][i][j] * - self.net_dict['delay_rel_std']) + + d_mean = self.net_dict["delay_matrix_mean"][i][j] + d_std = self.net_dict["delay_matrix_mean"][i][j] * self.net_dict["delay_rel_std"] d_min = self.sim_resolution - d_max = d_mean+3.0*d_std + d_max = d_mean + 3.0 * d_std syn_dict = { - 'weight': {'distribution':'normal_clipped', - 'mu':w_mean, 'low':w_min, - 'high':w_max, - 'sigma':w_std}, - 'delay': {'distribution':'normal_clipped', - 'mu':d_mean, 'low':d_min, - 'high':d_max, - 'sigma':d_std}, - 'receptor':i_receptor} - - ngpu.Connect( - source_pop, target_pop, conn_dict_rec, syn_dict) - - #def __connect_recording_devices(self): + "weight": { + "distribution": "normal_clipped", + "mu": w_mean, + "low": w_min, + "high": w_max, + "sigma": w_std, + }, + "delay": { + "distribution": "normal_clipped", + "mu": d_mean, + "low": d_min, + "high": d_max, + "sigma": d_std, + }, + "receptor": i_receptor, + } + + ngpu.Connect(source_pop, target_pop, conn_dict_rec, syn_dict) + + # def __connect_recording_devices(self): # """ Connects the recording devices to the microcircuit.""" # if self.Rank == 0: # print('Connecting recording devices.') @@ -476,57 +489,67 @@ def __connect_neuronal_populations(self): # conn_dict, syn_dict) def __connect_poisson_bg_input(self): - """ Connects the Poisson generators to the microcircuit.""" + """Connects the Poisson generators to the microcircuit.""" if self.Rank == 0: - print('Connecting Poisson generators for background input.') + print("Connecting Poisson generators for background input.") for i, target_pop in enumerate(self.pops): - conn_dict_poisson = {'rule': 'all_to_all'} + conn_dict_poisson = {"rule": "all_to_all"} syn_dict_poisson = { - 'weight': self.weight_ext, - 'delay': self.net_dict['delay_poisson']} + "weight": self.weight_ext, + "delay": self.net_dict["delay_poisson"], + } ngpu.Connect( - [self.poisson_bg_input[i]], target_pop, - conn_dict_poisson, syn_dict_poisson) + [self.poisson_bg_input[i]], + target_pop, + conn_dict_poisson, + syn_dict_poisson, + ) def __connect_thalamic_stim_input(self): - """ Connects the thalamic input to the neuronal populations.""" + """Connects the thalamic input to the neuronal populations.""" if self.Rank == 0: - print('Connecting thalamic input.') + print("Connecting thalamic input.") # connect Poisson input to thalamic population ngpu.Connect(self.poisson_th, self.thalamic_population) # connect thalamic population to neuronal populations for i, target_pop in enumerate(self.pops): - conn_dict_th = { - 'rule': 'fixed_total_number', - 'N': self.num_th_synapses[i]} - - w_mean = self.weight_th, - w_std = self.weight_th * self.net_dict['weight_rel_std'] - w_min = 0.0, - w_max = w_mean + 3.0*w_std - - d_mean = self.stim_dict['delay_th_mean'] - d_std = (self.stim_dict['delay_th_mean'] * - self.stim_dict['delay_th_rel_std']) + conn_dict_th = {"rule": "fixed_total_number", "N": self.num_th_synapses[i]} + + w_mean = (self.weight_th,) + w_std = self.weight_th * self.net_dict["weight_rel_std"] + w_min = (0.0,) + w_max = w_mean + 3.0 * w_std + + d_mean = self.stim_dict["delay_th_mean"] + d_std = self.stim_dict["delay_th_mean"] * self.stim_dict["delay_th_rel_std"] d_min = self.sim_resolution - d_max = d_mean + 3.0*d_std + d_max = d_mean + 3.0 * d_std syn_dict_th = { - 'weight': {"distribution":"normal_clipped", - "mu":w_mean, "low":w_min, - "high":w_max, - "sigma":w_std}, - 'delay': {"distribution":"normal_clipped", - "mu":d_mean, "low":d_min, - "high":d_max, - "sigma":d_std}} - - ngpu.Connect( - self.thalamic_population, target_pop, - conn_spec=conn_dict_th, syn_spec=syn_dict_th) + "weight": { + "distribution": "normal_clipped", + "mu": w_mean, + "low": w_min, + "high": w_max, + "sigma": w_std, + }, + "delay": { + "distribution": "normal_clipped", + "mu": d_mean, + "low": d_min, + "high": d_max, + "sigma": d_std, + }, + } + ngpu.Connect( + self.thalamic_population, + target_pop, + conn_spec=conn_dict_th, + syn_spec=syn_dict_th, + ) diff --git a/python/Potjans_2014_s/network_params.py b/python/Potjans_2014_s/network_params.py index a060e8205..c4e36e757 100644 --- a/python/Potjans_2014_s/network_params.py +++ b/python/Potjans_2014_s/network_params.py @@ -33,7 +33,7 @@ def get_exc_inh_matrix(val_exc, val_inh, num_pops): - """ Creates a matrix for excitatory and inhibitory values. + """Creates a matrix for excitatory and inhibitory values. Parameters ---------- @@ -58,109 +58,122 @@ def get_exc_inh_matrix(val_exc, val_inh, num_pops): net_dict = { # factor to scale the number of neurons - 'N_scaling': 1.0, # 0.1, + "N_scaling": 1.0, # 0.1, # factor to scale the indegrees - 'K_scaling': 1.0, # 0.1, + "K_scaling": 1.0, # 0.1, # neuron model - 'neuron_model': 'iaf_psc_exp', + "neuron_model": "iaf_psc_exp", # names of the simulated neuronal populations - 'populations': ['L23E', 'L23I', 'L4E', 'L4I', 'L5E', 'L5I', 'L6E', 'L6I'], + "populations": ["L23E", "L23I", "L4E", "L4I", "L5E", "L5I", "L6E", "L6I"], # number of neurons in the different populations (same order as # 'populations') - 'full_num_neurons': - np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]), + "full_num_neurons": np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]), # mean rates of the different populations in the non-scaled version of the # microcircuit (in spikes/s; same order as in 'populations'); # necessary for the scaling of the network. # The values were optained by running this PyNEST microcircuit with 12 MPI # processes and both 'N_scaling' and 'K_scaling' set to 1. - 'full_mean_rates': - np.array([0.943, 3.026, 4.368, 5.882, 7.733, 8.664, 1.096, 7.851]), + "full_mean_rates": np.array([0.943, 3.026, 4.368, 5.882, 7.733, 8.664, 1.096, 7.851]), # connection probabilities (the first index corresponds to the targets # and the second to the sources) - 'conn_probs': - np.array( - [[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0., 0.0076, 0.], - [0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0., 0.0042, 0.], - [0.0077, 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.], - [0.0691, 0.0029, 0.0794, 0.1597, 0.0033, 0., 0.1057, 0.], - [0.1004, 0.0622, 0.0505, 0.0057, 0.0831, 0.3726, 0.0204, 0.], - [0.0548, 0.0269, 0.0257, 0.0022, 0.06, 0.3158, 0.0086, 0.], - [0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, 0.0396, 0.2252], - [0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443]]), + "conn_probs": np.array( + [ + [0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0.0, 0.0076, 0.0], + [0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0.0, 0.0042, 0.0], + [0.0077, 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.0], + [0.0691, 0.0029, 0.0794, 0.1597, 0.0033, 0.0, 0.1057, 0.0], + [0.1004, 0.0622, 0.0505, 0.0057, 0.0831, 0.3726, 0.0204, 0.0], + [0.0548, 0.0269, 0.0257, 0.0022, 0.06, 0.3158, 0.0086, 0.0], + [0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, 0.0396, 0.2252], + [0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443], + ] + ), # mean amplitude of excitatory postsynaptic potential (in mV) - 'PSP_exc_mean': 0.15, + "PSP_exc_mean": 0.15, # relative standard deviation of the weight - 'weight_rel_std': 0.1, + "weight_rel_std": 0.1, # relative inhibitory weight - 'g': -4, + "g": -4, # mean delay of excitatory connections (in ms) - 'delay_exc_mean': 1.5, + "delay_exc_mean": 1.5, # mean delay of inhibitory connections (in ms) - 'delay_inh_mean': 0.75, + "delay_inh_mean": 0.75, # relative standard deviation of the delay of excitatory and # inhibitory connections - 'delay_rel_std': 0.5, - + "delay_rel_std": 0.5, # turn Poisson input on or off (True or False) # if False: DC input is applied for compensation - 'poisson_input': True, + "poisson_input": True, # indegree of external connections to the different populations (same order # as in 'populations') - 'K_ext': np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]), + "K_ext": np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]), # rate of the Poisson generator (in spikes/s) - 'bg_rate': 8., + "bg_rate": 8.0, # delay from the Poisson generator to the network (in ms) - 'delay_poisson': 1.5, - + "delay_poisson": 1.5, # initial conditions for the membrane potential, options are: # 'original': uniform mean and standard deviation for all populations as # used in earlier implementations of the model # 'optimized': population-specific mean and standard deviation, allowing a # reduction of the initial activity burst in the network # (default) - 'V0_type': 'optimized', + "V0_type": "optimized", # parameters of the neuron model - 'neuron_params': { + "neuron_params": { # membrane potential average for the neurons (in mV) - 'V0_mean': {'original': -58.0, - 'optimized': [-68.28, -63.16, -63.33, -63.45, - -63.11, -61.66, -66.72, -61.43]}, + "V0_mean": { + "original": -58.0, + "optimized": [ + -68.28, + -63.16, + -63.33, + -63.45, + -63.11, + -61.66, + -66.72, + -61.43, + ], + }, # standard deviation of the average membrane potential (in mV) - 'V0_std': {'original': 10.0, - 'optimized': [5.36, 4.57, 4.74, 4.94, - 4.94, 4.55, 5.46, 4.48]}, + "V0_std": { + "original": 10.0, + "optimized": [5.36, 4.57, 4.74, 4.94, 4.94, 4.55, 5.46, 4.48], + }, # reset membrane potential of the neurons (in mV) - 'E_L': -65.0, + "E_L": -65.0, # threshold potential of the neurons (in mV) - 'V_th': -50.0, + "V_th": -50.0, # membrane potential after a spike (in mV) - 'V_reset': -65.0, + "V_reset": -65.0, # membrane capacitance (in pF) - 'C_m': 250.0, + "C_m": 250.0, # membrane time constant (in ms) - 'tau_m': 10.0, + "tau_m": 10.0, # time constant of postsynaptic currents (in ms) - 'tau_syn': 0.5, + "tau_syn": 0.5, # refractory period of the neurons after a spike (in ms) - 't_ref': 2.0}} + "t_ref": 2.0, + }, +} # derive matrix of mean PSPs, # the mean PSP of the connection from L4E to L23E is doubled PSP_matrix_mean = get_exc_inh_matrix( - net_dict['PSP_exc_mean'], - net_dict['PSP_exc_mean'] * net_dict['g'], - len(net_dict['populations'])) -PSP_matrix_mean[0, 2] = 2. * net_dict['PSP_exc_mean'] + net_dict["PSP_exc_mean"], + net_dict["PSP_exc_mean"] * net_dict["g"], + len(net_dict["populations"]), +) +PSP_matrix_mean[0, 2] = 2.0 * net_dict["PSP_exc_mean"] updated_dict = { # matrix of mean PSPs - 'PSP_matrix_mean': PSP_matrix_mean, - + "PSP_matrix_mean": PSP_matrix_mean, # matrix of mean delays - 'delay_matrix_mean': get_exc_inh_matrix( - net_dict['delay_exc_mean'], - net_dict['delay_inh_mean'], - len(net_dict['populations']))} + "delay_matrix_mean": get_exc_inh_matrix( + net_dict["delay_exc_mean"], + net_dict["delay_inh_mean"], + len(net_dict["populations"]), + ), +} net_dict.update(updated_dict) diff --git a/python/Potjans_2014_s/run_microcircuit.py b/python/Potjans_2014_s/run_microcircuit.py index 9d211f962..d0ab39273 100644 --- a/python/Potjans_2014_s/run_microcircuit.py +++ b/python/Potjans_2014_s/run_microcircuit.py @@ -32,13 +32,16 @@ ############################################################################### # Import the necessary modules and start the time measurements. -from stimulus_params import stim_dict -from network_params import net_dict -from sim_params import sim_dict +import time + import network -#import nestgpu as ngpu + +# import nestgpu as ngpu import numpy as np -import time +from network_params import net_dict +from sim_params import sim_dict +from stimulus_params import stim_dict + time_start = time.time() ############################################################################### @@ -60,10 +63,10 @@ net.connect() time_connect = time.time() -net.simulate(sim_dict['t_presim']) +net.simulate(sim_dict["t_presim"]) time_presimulate = time.time() -net.simulate(sim_dict['t_sim']) +net.simulate(sim_dict["t_sim"]) time_simulate = time.time() ############################################################################### @@ -74,10 +77,8 @@ # The computation of spike rates discards the presimulation time to exclude # initialization artifacts. -raster_plot_interval = np.array([stim_dict['th_start'] - 100.0, - stim_dict['th_start'] + 100.0]) -firing_rates_interval = np.array([sim_dict['t_presim'], - sim_dict['t_presim'] + sim_dict['t_sim']]) +raster_plot_interval = np.array([stim_dict["th_start"] - 100.0, stim_dict["th_start"] + 100.0]) +firing_rates_interval = np.array([sim_dict["t_presim"], sim_dict["t_presim"] + sim_dict["t_sim"]]) net.evaluate(raster_plot_interval, firing_rates_interval) time_evaluate = time.time() @@ -86,25 +87,12 @@ # data evaluation and print calls. print( - '\nTimes:\n' + # of Rank {}:\n'.format( .Rank()) + - ' Total time: {:.3f} s\n'.format( - time_evaluate - - time_start) + - ' Time to initialize: {:.3f} s\n'.format( - time_network - - time_start) + - ' Time to create: {:.3f} s\n'.format( - time_create - - time_network) + - ' Time to connect: {:.3f} s\n'.format( - time_connect - - time_create) + - ' Time to presimulate: {:.3f} s\n'.format( - time_presimulate - - time_connect) + - ' Time to simulate: {:.3f} s\n'.format( - time_simulate - - time_presimulate) + - ' Time to evaluate: {:.3f} s\n'.format( - time_evaluate - - time_simulate)) + "\nTimes:\n" + + " Total time: {:.3f} s\n".format(time_evaluate - time_start) # of Rank {}:\n'.format( .Rank()) + + + " Time to initialize: {:.3f} s\n".format(time_network - time_start) + + " Time to create: {:.3f} s\n".format(time_create - time_network) + + " Time to connect: {:.3f} s\n".format(time_connect - time_create) + + " Time to presimulate: {:.3f} s\n".format(time_presimulate - time_connect) + + " Time to simulate: {:.3f} s\n".format(time_simulate - time_presimulate) + + " Time to evaluate: {:.3f} s\n".format(time_evaluate - time_simulate) +) diff --git a/python/Potjans_2014_s/sim_params.py b/python/Potjans_2014_s/sim_params.py index b7fbff797..04215e7f2 100644 --- a/python/Potjans_2014_s/sim_params.py +++ b/python/Potjans_2014_s/sim_params.py @@ -34,26 +34,27 @@ # The full simulation time is the sum of a presimulation time and the main # simulation time. # presimulation time (in ms) - 't_presim': 1000.0, + "t_presim": 1000.0, # simulation time (in ms) - 't_sim': 10000.0, + "t_sim": 10000.0, # resolution of the simulation (in ms) - 'sim_resolution': 0.1, + "sim_resolution": 0.1, # list of recording devices, default is 'spike_detector'. A 'voltmeter' can # be added to record membrane voltages of the neurons. Nothing will be # recorded if an empty list is given. - 'rec_dev': ['spike_detector'], + "rec_dev": ["spike_detector"], # path to save the output data - 'data_path': os.path.join(os.getcwd(), 'data/'), + "data_path": os.path.join(os.getcwd(), "data/"), # masterseed for NEST GPU and NumPy - 'master_seed': 12349, #55, + "master_seed": 12349, # 55, # number of threads per MPI process - 'local_num_threads': 1, + "local_num_threads": 1, # recording interval of the membrane potential (in ms) - 'rec_V_int': 1.0, + "rec_V_int": 1.0, # if True, data will be overwritten, # if False, a NESTError is raised if the files already exist - 'overwrite_files': True, + "overwrite_files": True, # print the time progress. This should only be used when the simulation # is run on a local machine. - 'print_time': False} + "print_time": False, +} diff --git a/python/Potjans_2014_s/sim_params_norec.py b/python/Potjans_2014_s/sim_params_norec.py index e5503d0b7..ef85880ae 100644 --- a/python/Potjans_2014_s/sim_params_norec.py +++ b/python/Potjans_2014_s/sim_params_norec.py @@ -34,26 +34,27 @@ # The full simulation time is the sum of a presimulation time and the main # simulation time. # presimulation time (in ms) - 't_presim': 0.1, + "t_presim": 0.1, # simulation time (in ms) - 't_sim': 10000.0, + "t_sim": 10000.0, # resolution of the simulation (in ms) - 'sim_resolution': 0.1, + "sim_resolution": 0.1, # list of recording devices, default is 'spike_detector'. A 'voltmeter' can # be added to record membrane voltages of the neurons. Nothing will be # recorded if an empty list is given. - 'rec_dev': [], + "rec_dev": [], # path to save the output data - 'data_path': os.path.join(os.getcwd(), 'data/'), + "data_path": os.path.join(os.getcwd(), "data/"), # masterseed for NESTGPU and NumPy - 'master_seed': 12349, #55, + "master_seed": 12349, # 55, # number of threads per MPI process - 'local_num_threads': 1, + "local_num_threads": 1, # recording interval of the membrane potential (in ms) - 'rec_V_int': 1.0, + "rec_V_int": 1.0, # if True, data will be overwritten, # if False, a NESTError is raised if the files already exist - 'overwrite_files': True, + "overwrite_files": True, # print the time progress. This should only be used when the simulation # is run on a local machine. - 'print_time': False} + "print_time": False, +} diff --git a/python/Potjans_2014_s/stimulus_params.py b/python/Potjans_2014_s/stimulus_params.py index 42d397dfd..c79f53768 100644 --- a/python/Potjans_2014_s/stimulus_params.py +++ b/python/Potjans_2014_s/stimulus_params.py @@ -34,34 +34,33 @@ stim_dict = { # optional thalamic input # turn thalamic input on or off (True or False) - 'thalamic_input': False, + "thalamic_input": False, # start of the thalamic input (in ms) - 'th_start': 700.0, + "th_start": 700.0, # duration of the thalamic input (in ms) - 'th_duration': 10.0, + "th_duration": 10.0, # rate of the thalamic input (in spikes/s) - 'th_rate': 120.0, + "th_rate": 120.0, # number of thalamic neurons - 'num_th_neurons': 902, + "num_th_neurons": 902, # connection probabilities of the thalamus to the different populations # (same order as in 'populations' in 'net_dict') - 'conn_probs_th': - np.array([0.0, 0.0, 0.0983, 0.0619, 0.0, 0.0, 0.0512, 0.0196]), + "conn_probs_th": np.array([0.0, 0.0, 0.0983, 0.0619, 0.0, 0.0, 0.0512, 0.0196]), # mean amplitude of the thalamic postsynaptic potential (in mV), # standard deviation will be taken from 'net_dict' - 'PSP_th': 0.15, + "PSP_th": 0.15, # mean delay of the thalamic input (in ms) - 'delay_th_mean': 1.5, + "delay_th_mean": 1.5, # relative standard deviation of the thalamic delay (in ms) - 'delay_th_rel_std': 0.5, - + "delay_th_rel_std": 0.5, # optional DC input # turn DC input on or off (True or False) - 'dc_input': False, + "dc_input": False, # start of the DC input (in ms) - 'dc_start': 650.0, + "dc_start": 650.0, # duration of the DC input (in ms) - 'dc_dur': 100.0, + "dc_dur": 100.0, # amplitude of the DC input (in pA); final amplitude is population-specific # and will be obtained by multiplication with 'K_ext' - 'dc_amp': 0.3} + "dc_amp": 0.3, +} diff --git a/python/examples/balanced_izh.py b/python/examples/balanced_izh.py index 66046a8c7..217e77b16 100644 --- a/python/examples/balanced_izh.py +++ b/python/examples/balanced_izh.py @@ -1,33 +1,34 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 ngpu.SetKernelStatus("time_resolution", 1.0) print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 80 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 80 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron -#fact=0.002 -fact=0.9 -Wex = 0.5*fact -Win = -3.5*fact +# fact=0.002 +fact = 0.9 +Wex = 0.5 * fact +Win = -3.5 * fact tau_plus = 20.0 tau_minus = 20.0 @@ -38,20 +39,36 @@ mu_minus = 1.0 Wmax = 0.001 -syn_group_ex = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd_ex, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) -syn_group_in = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd_in, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) +syn_group_ex = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd_ex, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) +syn_group_in = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd_in, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz -poiss_weight = 0.37*fact -poiss_delay = 1.0 # poisson signal delay in ms +poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_weight = 0.37 * fact +poiss_delay = 1.0 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -59,9 +76,9 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("izhikevich", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters delay = 2.0 @@ -69,49 +86,56 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": delay, "receptor":0, - "synapse_group":syn_group_ex} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = { + "weight": Wex, + "delay": delay, + "receptor": 0, + "synapse_group": syn_group_ex, +} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":delay, "receptor":1, - "synapse_group":syn_group_in} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = { + "weight": Win, + "delay": delay, + "receptor": 1, + "synapse_group": syn_group_in, +} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) filename = "test_brunel_net.dat" -i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons-1]] +i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons - 1]] i_receptor_arr = [0, 0, 0] # any set of neuron indexes # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) ngpu.Simulate() -nrows=ngpu.GetRecordDataRows(record) -ncol=ngpu.GetRecordDataColumns(record) -#print nrows, ncol +nrows = ngpu.GetRecordDataRows(record) +ncol = ngpu.GetRecordDataColumns(record) +# print nrows, ncol data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/balanced_izh_cond_beta.py b/python/examples/balanced_izh_cond_beta.py index 22fca9762..855ebd73f 100644 --- a/python/examples/balanced_izh_cond_beta.py +++ b/python/examples/balanced_izh_cond_beta.py @@ -1,33 +1,34 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 ngpu.SetKernelStatus("time_resolution", 1.0) print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 80 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 80 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron -#fact=0.002 -fact=0.0055 -Wex = 0.5*fact -Win = 3.5*fact +# fact=0.002 +fact = 0.0055 +Wex = 0.5 * fact +Win = 3.5 * fact tau_plus = 20.0 tau_minus = 20.0 @@ -38,20 +39,36 @@ mu_minus = 1.0 Wmax = 0.001 -syn_group_ex = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd_ex, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) -syn_group_in = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd_in, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) +syn_group_ex = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd_ex, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) +syn_group_in = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd_in, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz -poiss_weight = 0.37*fact -poiss_delay = 1.0 # poisson signal delay in ms +poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_weight = 0.37 * fact +poiss_delay = 1.0 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -59,66 +76,72 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("izhikevich_cond_beta", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) -ngpu.SetStatus(neuron, {"h_min_rel":0.1, "h0_rel":0.1}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) +ngpu.SetStatus(neuron, {"h_min_rel": 0.1, "h0_rel": 0.1}) delay = 2.0 # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": delay, "receptor":0, - "synapse_group":syn_group_ex} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = { + "weight": Wex, + "delay": delay, + "receptor": 0, + "synapse_group": syn_group_ex, +} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":delay, "receptor":1, - "synapse_group":syn_group_in} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = { + "weight": Win, + "delay": delay, + "receptor": 1, + "synapse_group": syn_group_in, +} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) filename = "test_brunel_net.dat" -i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons-1]] +i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons - 1]] i_receptor_arr = [0, 0, 0] # any set of neuron indexes # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) ngpu.Simulate() -nrows=ngpu.GetRecordDataRows(record) -ncol=ngpu.GetRecordDataColumns(record) -#print nrows, ncol +nrows = ngpu.GetRecordDataRows(record) +ncol = ngpu.GetRecordDataColumns(record) +# print nrows, ncol data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/balanced_izh_psc_exp_2s.py b/python/examples/balanced_izh_psc_exp_2s.py index ebb91f288..a14a0f10d 100644 --- a/python/examples/balanced_izh_psc_exp_2s.py +++ b/python/examples/balanced_izh_psc_exp_2s.py @@ -1,34 +1,35 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 ngpu.SetKernelStatus("time_resolution", 1.0) print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers -sim_time = 10.0 # simulation time in seconds +sim_time = 10.0 # simulation time in seconds n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 80 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 80 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron -fact=0.42 -Wex = 0.5*fact -Win = -3.5*fact +fact = 0.42 +Wex = 0.5 * fact +Win = -3.5 * fact tau_plus = 20.0 tau_minus = 20.0 @@ -38,15 +39,23 @@ mu_minus = 1.0 Wmax = 10.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz -poiss_weight = 0.37*fact -poiss_delay = 1.0 # poisson signal delay in ms +poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_weight = 0.37 * fact +poiss_delay = 1.0 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -55,9 +64,9 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("izhikevich_psc_exp_2s", n_neurons, n_receptors) ngpu.ActivateSpikeCount(neuron) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters delay = 2.0 @@ -65,48 +74,51 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": delay, "receptor":0, - "synapse_group":syn_group} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = { + "weight": Wex, + "delay": delay, + "receptor": 0, + "synapse_group": syn_group, +} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":delay, "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = {"weight": Win, "delay": delay, "receptor": 1} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) filename = "test_brunel_net.dat" -i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons-1]] +i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons - 1]] i_receptor_arr = [0, 0, 0] # any set of neuron indexes # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) -ngpu.Simulate(sim_time*1000.0) +ngpu.Simulate(sim_time * 1000.0) spike_count = ngpu.GetStatus(neuron, "spike_count") -spike_count = [s[0] for s in spike_count] -print("Average firing rate: ", sum(spike_count)/len(spike_count)/sim_time) +spike_count = [s[0] for s in spike_count] +print("Average firing rate: ", sum(spike_count) / len(spike_count) / sim_time) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/balanced_izh_psc_exp_2s_eval_time.py b/python/examples/balanced_izh_psc_exp_2s_eval_time.py index c4bfc79af..5dd58b8ab 100644 --- a/python/examples/balanced_izh_psc_exp_2s_eval_time.py +++ b/python/examples/balanced_izh_psc_exp_2s_eval_time.py @@ -1,34 +1,35 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 ngpu.SetKernelStatus("time_resolution", 1.0) print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers -sim_time = 10.0 # simulation time in seconds +sim_time = 10.0 # simulation time in seconds n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 80 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 80 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron -fact=0.42 -Wex = 0.5*fact -Win = -3.5*fact +fact = 0.42 +Wex = 0.5 * fact +Win = -3.5 * fact tau_plus = 20.0 tau_minus = 20.0 @@ -38,15 +39,23 @@ mu_minus = 1.0 Wmax = 10.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz -poiss_weight = 0.37*fact -poiss_delay = 1.0 # poisson signal delay in ms +poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_weight = 0.37 * fact +poiss_delay = 1.0 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -54,9 +63,9 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("izhikevich_psc_exp_2s", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters delay = 2.0 @@ -64,25 +73,29 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": delay, "receptor":0, - "synapse_group":syn_group} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = { + "weight": Wex, + "delay": delay, + "receptor": 0, + "synapse_group": syn_group, +} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":delay, "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = {"weight": Win, "delay": delay, "receptor": 1} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) -ngpu.Simulate(sim_time*1000.0) +ngpu.Simulate(sim_time * 1000.0) diff --git a/python/examples/balanced_izh_psc_exp_5s.py b/python/examples/balanced_izh_psc_exp_5s.py index a1a29c312..8279b372c 100644 --- a/python/examples/balanced_izh_psc_exp_5s.py +++ b/python/examples/balanced_izh_psc_exp_5s.py @@ -1,33 +1,34 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 ngpu.SetKernelStatus("time_resolution", 1.0) print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 80 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 80 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron -#fact=0.002 -fact=0.44 -Wex = 0.5*fact -Win = -3.5*fact +# fact=0.002 +fact = 0.44 +Wex = 0.5 * fact +Win = -3.5 * fact tau_plus = 20.0 tau_minus = 20.0 @@ -38,20 +39,36 @@ mu_minus = 1.0 Wmax = 10.0 -syn_group_ex = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd_ex, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) -syn_group_in = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd_in, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) +syn_group_ex = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd_ex, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) +syn_group_in = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd_in, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz -poiss_weight = 0.37*fact -poiss_delay = 1.0 # poisson signal delay in ms +poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_weight = 0.37 * fact +poiss_delay = 1.0 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -59,9 +76,9 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("izhikevich_psc_exp_5s", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters delay = 2.0 @@ -69,49 +86,56 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": delay, "receptor":0, - "synapse_group":syn_group_ex} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = { + "weight": Wex, + "delay": delay, + "receptor": 0, + "synapse_group": syn_group_ex, +} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":delay, "receptor":1, - "synapse_group":syn_group_in} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = { + "weight": Win, + "delay": delay, + "receptor": 1, + "synapse_group": syn_group_in, +} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) filename = "test_brunel_net.dat" -i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons-1]] +i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons - 1]] i_receptor_arr = [0, 0, 0] # any set of neuron indexes # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) ngpu.Simulate() -nrows=ngpu.GetRecordDataRows(record) -ncol=ngpu.GetRecordDataColumns(record) -#print nrows, ncol +nrows = ngpu.GetRecordDataRows(record) +ncol = ngpu.GetRecordDataColumns(record) +# print nrows, ncol data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/brunel_array.py b/python/examples/brunel_array.py index 156a10fc1..2dc247b9f 100644 --- a/python/examples/brunel_array.py +++ b/python/examples/brunel_array.py @@ -1,34 +1,35 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -36,72 +37,64 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 std_delay = 0.25 min_delay = 0.1 # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_delays = ngpu.RandomNormalClipped(CE*n_neurons, mean_delay, - std_delay, min_delay, - mean_delay+3*std_delay) +exc_delays = ngpu.RandomNormalClipped(CE * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay) -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": {"array":exc_delays}, "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = {"weight": Wex, "delay": {"array": exc_delays}, "receptor": 0} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_delays = ngpu.RandomNormalClipped(CI*n_neurons, mean_delay, - std_delay, min_delay, - mean_delay+3*std_delay) +inh_delays = ngpu.RandomNormalClipped(CI * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay) -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":{"array": inh_delays}, - "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = {"weight": Win, "delay": {"array": inh_delays}, "receptor": 1} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) filename = "test_brunel_net.dat" -i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons-1]] +i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons - 1]] i_receptor_arr = [0, 0, 0] # any set of neuron indexes # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) ngpu.Simulate() -nrows=ngpu.GetRecordDataRows(record) -ncol=ngpu.GetRecordDataColumns(record) -#print nrows, ncol +nrows = ngpu.GetRecordDataRows(record) +ncol = ngpu.GetRecordDataColumns(record) +# print nrows, ncol data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/brunel_list.py b/python/examples/brunel_list.py index 6b005133d..c01eb2b00 100644 --- a/python/examples/brunel_list.py +++ b/python/examples/brunel_list.py @@ -1,34 +1,35 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -37,8 +38,8 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons neuron_list = neuron.ToList() exc_neuron_list = exc_neuron.ToList() inh_neuron_list = exc_neuron.ToList() @@ -47,8 +48,7 @@ E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 @@ -57,50 +57,62 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron_list, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron_list, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg_list, neuron_list, pg_conn_dict, pg_syn_dict) filename = "test_brunel_list.dat" -i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons-1]] +i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons - 1]] i_receptor_arr = [0, 0, 0] # any set of neuron indexes # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) ngpu.Simulate() -nrows=ngpu.GetRecordDataRows(record) -ncol=ngpu.GetRecordDataColumns(record) -#print nrows, ncol +nrows = ngpu.GetRecordDataRows(record) +ncol = ngpu.GetRecordDataColumns(record) +# print nrows, ncol data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/brunel_mpi.py b/python/examples/brunel_mpi.py index 2c92090ed..7e08a18e3 100644 --- a/python/examples/brunel_mpi.py +++ b/python/examples/brunel_mpi.py @@ -1,41 +1,41 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu -ngpu.ConnectMpiInit(); +ngpu.ConnectMpiInit() mpi_np = ngpu.MpiNp() if (mpi_np != 2) | (len(sys.argv) != 2): - print ("Usage: mpirun -np 2 python %s n_neurons" % sys.argv[0]) + print("Usage: mpirun -np 2 python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 mpi_id = ngpu.MpiId() print("Building on host ", mpi_id, " ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -delay = 1.0 # synaptic delay in ms +delay = 1.0 # synaptic delay in ms -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -43,23 +43,21 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) # Excitatory local connections, defined on all hosts # connect excitatory neurons to port 0 of all neurons # weight Wex and fixed indegree CE*3/4 -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE*3//4} -exc_syn_dict={"weight": Wex, "delay": delay, - "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE * 3 // 4} +exc_syn_dict = {"weight": Wex, "delay": delay, "receptor": 0} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) @@ -67,28 +65,25 @@ # connect inhibitory neurons to port 1 of all neurons # weight Win and fixed indegree CI*3/4 -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI*3//4} -inh_syn_dict={"weight": Win, "delay": delay, - "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI * 3 // 4} +inh_syn_dict = {"weight": Win, "delay": delay, "receptor": 1} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) filename = "test_brunel_mpi" + str(mpi_id) + ".dat" -i_neuron_arr = [neuron[0], neuron[randrange(n_neurons)], neuron[n_neurons-1]] +i_neuron_arr = [neuron[0], neuron[randrange(n_neurons)], neuron[n_neurons - 1]] i_receptor_arr = [0, 0, 0] # any set of neuron indexes # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) ###################################################################### ## WRITE HERE REMOTE CONNECTIONS @@ -98,9 +93,8 @@ # connect excitatory neurons to port 0 of all neurons # weight Wex and fixed indegree CE//4 # host 0 to host 1 -re_conn_dict={"rule": "fixed_indegree", "indegree": CE//4} -re_syn_dict={"weight": Wex, "delay": delay, - "receptor":0} +re_conn_dict = {"rule": "fixed_indegree", "indegree": CE // 4} +re_syn_dict = {"weight": Wex, "delay": delay, "receptor": 0} # host 0 to host 1 ngpu.RemoteConnect(0, exc_neuron, 1, neuron, re_conn_dict, re_syn_dict) # host 1 to host 0 @@ -110,9 +104,8 @@ # connect inhibitory neurons to port 1 of all neurons # weight Win and fixed indegree CI//4 # host 0 to host 1 -ri_conn_dict={"rule": "fixed_indegree", "indegree": CI//4} -ri_syn_dict={"weight": Win, "delay": delay, - "receptor":1} +ri_conn_dict = {"rule": "fixed_indegree", "indegree": CI // 4} +ri_syn_dict = {"weight": Win, "delay": delay, "receptor": 1} # host 0 to host 1 ngpu.RemoteConnect(0, inh_neuron, 1, neuron, ri_conn_dict, ri_syn_dict) # host 1 to host 0 @@ -120,15 +113,15 @@ ngpu.Simulate() -nrows=ngpu.GetRecordDataRows(record) -ncol=ngpu.GetRecordDataColumns(record) -#print nrows, ncol +nrows = ngpu.GetRecordDataRows(record) +ncol = ngpu.GetRecordDataColumns(record) +# print nrows, ncol data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/brunel_net.py b/python/examples/brunel_net.py index 70e524d91..9202e9180 100644 --- a/python/examples/brunel_net.py +++ b/python/examples/brunel_net.py @@ -1,59 +1,65 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") ngpu.SetStatus(pg, "rate", poiss_rate) # Create n_neurons neurons with n_receptor receptor ports -#neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) +# neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) neuron = ngpu.Create("aeif_cond_beta", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -#ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, +# ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, # "tau_rise":tau_rise}) -ngpu.SetStatus(neuron, {"E_rev_ex": E_rev[0], - "E_rev_in": E_rev[1], - "tau_decay_ex": tau_decay[0], - "tau_decay_in": tau_decay[1], - "tau_rise_ex": tau_rise[0], - "tau_rise_in": tau_rise[1]}) +ngpu.SetStatus( + neuron, + { + "E_rev_ex": E_rev[0], + "E_rev_in": E_rev[1], + "tau_decay_ex": tau_decay[0], + "tau_decay_in": tau_decay[1], + "tau_rise_ex": tau_rise[0], + "tau_rise_in": tau_rise[1], + }, +) mean_delay = 0.5 std_delay = 0.25 @@ -61,52 +67,65 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) filename = "test_brunel_net.dat" -i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons-1]] +i_neuron_arr = [neuron[37], neuron[randrange(n_neurons)], neuron[n_neurons - 1]] i_receptor_arr = [0, 0, 0] # any set of neuron indexes # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) ngpu.Simulate() -nrows=ngpu.GetRecordDataRows(record) -ncol=ngpu.GetRecordDataColumns(record) -#print nrows, ncol +nrows = ngpu.GetRecordDataRows(record) +ncol = ngpu.GetRecordDataColumns(record) +# print nrows, ncol data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/brunel_outdegree.py b/python/examples/brunel_outdegree.py index bd0c9fc02..fbc97f599 100644 --- a/python/examples/brunel_outdegree.py +++ b/python/examples/brunel_outdegree.py @@ -1,33 +1,34 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CPN = 1000 # number of output connections per neuron +CPN = 1000 # number of output connections per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -35,15 +36,14 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 std_delay = 0.25 @@ -51,66 +51,84 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CPN connections per neuron -exc_conn_dict={"rule": "fixed_outdegree", "outdegree": CPN} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_outdegree", "outdegree": CPN} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CPN connections per neuron -inh_conn_dict={"rule": "fixed_outdegree", "outdegree": CPN} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_outdegree", "outdegree": CPN} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) filename = "test_brunel_outdegree.dat" # any set of neuron indexes -i_neuron_arr = [neuron[0], neuron[randrange(n_neurons)], - neuron[randrange(n_neurons)], neuron[randrange(n_neurons)], - neuron[randrange(n_neurons)], neuron[randrange(n_neurons)], - neuron[randrange(n_neurons)], neuron[randrange(n_neurons)], - neuron[randrange(n_neurons)], neuron[n_neurons-1]] +i_neuron_arr = [ + neuron[0], + neuron[randrange(n_neurons)], + neuron[randrange(n_neurons)], + neuron[randrange(n_neurons)], + neuron[randrange(n_neurons)], + neuron[randrange(n_neurons)], + neuron[randrange(n_neurons)], + neuron[randrange(n_neurons)], + neuron[randrange(n_neurons)], + neuron[n_neurons - 1], +] i_receptor_arr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # create multimeter record of V_m -var_name_arr = ["V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", - "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +var_name_arr = ["V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m"] +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) ngpu.Simulate() -nrows=ngpu.GetRecordDataRows(record) -ncol=ngpu.GetRecordDataColumns(record) -#print nrows, ncol +nrows = ngpu.GetRecordDataRows(record) +ncol = ngpu.GetRecordDataColumns(record) +# print nrows, ncol data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] -V4=[row[4] for row in data_list] -V5=[row[5] for row in data_list] -V6=[row[6] for row in data_list] -V7=[row[7] for row in data_list] -V8=[row[8] for row in data_list] -V9=[row[9] for row in data_list] -V10=[row[10] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] +V4 = [row[4] for row in data_list] +V5 = [row[5] for row in data_list] +V6 = [row[6] for row in data_list] +V7 = [row[7] for row in data_list] +V8 = [row[8] for row in data_list] +V9 = [row[9] for row in data_list] +V10 = [row[10] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/example1.py b/python/examples/example1.py index f609b2bac..be9b2dd6b 100644 --- a/python/examples/example1.py +++ b/python/examples/example1.py @@ -2,15 +2,15 @@ neuron = ngpu.Create("aeif_cond_beta") -ngpu.SetStatus(neuron, {"I_e":1000.0}) +ngpu.SetStatus(neuron, {"I_e": 1000.0}) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) ngpu.Simulate() data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/example2.py b/python/examples/example2.py index e519c2b7a..2d070a924 100644 --- a/python/examples/example2.py +++ b/python/examples/example2.py @@ -1,12 +1,12 @@ import nestgpu as ngpu neuron = ngpu.Create("aeif_cond_beta") -poiss_gen = ngpu.Create("poisson_generator"); +poiss_gen = ngpu.Create("poisson_generator") ngpu.SetStatus(poiss_gen, "rate", 12000.0) -conn_dict={"rule": "one_to_one"} -syn_dict={"weight": 0.05, "delay": 2.0, "receptor":0} +conn_dict = {"rule": "one_to_one"} +syn_dict = {"weight": 0.05, "delay": 2.0, "receptor": 0} ngpu.Connect(poiss_gen, neuron, conn_dict, syn_dict) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) @@ -14,8 +14,8 @@ ngpu.Simulate() data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/examples/plot_aeif_cond_alpha.py b/python/examples/plot_aeif_cond_alpha.py index c70022226..4922a8ac4 100644 --- a/python/examples/plot_aeif_cond_alpha.py +++ b/python/examples/plot_aeif_cond_alpha.py @@ -1,11 +1,24 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0001 -neuron = ngpu.Create('aeif_cond_alpha', 1) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, - "g_L":300.0, 'E_rev_ex':20.0, 'E_rev_in': -85.0, - 'tau_syn_ex':40.0, 'tau_syn_in': 20.0}) +neuron = ngpu.Create("aeif_cond_alpha", 1) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "E_rev_ex": 20.0, + "E_rev_in": -85.0, + "tau_syn_ex": 40.0, + "tau_syn_in": 20.0, + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] @@ -16,9 +29,9 @@ delay = [1.0, 100.0] weight = [0.1, 0.2] -conn_spec={"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +conn_spec = {"rule": "all_to_all"} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) @@ -27,22 +40,22 @@ ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] - -data = np.loadtxt('../test/test_aeif_cond_alpha_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) - -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -#print(dV) -rmse =np.std(dV)/abs(np.mean(V_m)) +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] + +data = np.loadtxt("../test/test_aeif_cond_alpha_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) + +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +# print(dV) +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt diff --git a/python/examples/plot_aeif_cond_alpha_multisynapse.py b/python/examples/plot_aeif_cond_alpha_multisynapse.py index 656827444..764dedbba 100644 --- a/python/examples/plot_aeif_cond_alpha_multisynapse.py +++ b/python/examples/plot_aeif_cond_alpha_multisynapse.py @@ -1,11 +1,22 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_cond_alpha_multisynapse', 1, 3) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, - "g_L":300.0, 'E_rev':[20.0, 0.0, -85.0], \ - 'tau_syn':[40.0, 20.0, 30.0]}) +neuron = ngpu.Create("aeif_cond_alpha_multisynapse", 1, 3) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "E_rev": [20.0, 0.0, -85.0], + "tau_syn": [40.0, 20.0, 30.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -15,40 +26,40 @@ delay = [1.0, 100.0, 130.0] weight = [0.1, 0.2, 0.5] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} for syn in range(3): - syn_spec={'receptor': syn, 'weight': weight[syn], 'delay': delay[syn]} + syn_spec = {"receptor": syn, "weight": weight[syn], "delay": delay[syn]} ngpu.Connect(spike, neuron, conn_spec, syn_spec) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_cond_alpha_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_cond_alpha_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_aeif_cond_alpha_multisynapse_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("../test/test_aeif_cond_alpha_multisynapse_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_aeif_cond_beta.py b/python/examples/plot_aeif_cond_beta.py index 49d72249f..0737bcb14 100644 --- a/python/examples/plot_aeif_cond_beta.py +++ b/python/examples/plot_aeif_cond_beta.py @@ -1,14 +1,26 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.00005 -neuron = ngpu.Create('aeif_cond_beta', 1) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, "g_L":300.0, - 'E_rev_ex': 20.0, 'E_rev_in': -85.0, - 'tau_decay_ex': 40.0, - 'tau_decay_in': 20.0, - 'tau_rise_ex': 20.0, - 'tau_rise_in': 5.0}) +neuron = ngpu.Create("aeif_cond_beta", 1) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "E_rev_ex": 20.0, + "E_rev_in": -85.0, + "tau_decay_ex": 40.0, + "tau_decay_in": 20.0, + "tau_rise_ex": 20.0, + "tau_rise_in": 5.0, + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -18,9 +30,9 @@ delay = [1.0, 100.0] weight = [0.1, 0.2] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} for syn in range(2): - syn_spec={'receptor': syn, 'weight': weight[syn], 'delay': delay[syn]} + syn_spec = {"receptor": syn, "weight": weight[syn], "delay": delay[syn]} ngpu.Connect(spike, neuron, conn_spec, syn_spec) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) @@ -28,22 +40,22 @@ ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] -data = np.loadtxt('../test/test_aeif_cond_beta_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("../test/test_aeif_cond_beta_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt diff --git a/python/examples/plot_aeif_cond_beta_multisynapse.py b/python/examples/plot_aeif_cond_beta_multisynapse.py index a3483b48f..1cff23d85 100644 --- a/python/examples/plot_aeif_cond_beta_multisynapse.py +++ b/python/examples/plot_aeif_cond_beta_multisynapse.py @@ -1,12 +1,23 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_cond_beta_multisynapse', 1, 3) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, - "g_L":300.0, 'E_rev':[20.0, 0.0, -85.0], \ - 'tau_decay':[40.0, 20.0, 30.0], \ - 'tau_rise':[20.0, 10.0, 5.0]}) +neuron = ngpu.Create("aeif_cond_beta_multisynapse", 1, 3) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "E_rev": [20.0, 0.0, -85.0], + "tau_decay": [40.0, 20.0, 30.0], + "tau_rise": [20.0, 10.0, 5.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -16,40 +27,40 @@ delay = [1.0, 100.0, 130.0] weight = [0.1, 0.2, 0.5] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} for syn in range(3): - syn_spec={'receptor': syn, 'weight': weight[syn], 'delay': delay[syn]} + syn_spec = {"receptor": syn, "weight": weight[syn], "delay": delay[syn]} ngpu.Connect(spike, neuron, conn_spec, syn_spec) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_cond_beta_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_cond_beta_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_aeif_cond_beta_multisynapse_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("../test/test_aeif_cond_beta_multisynapse_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_aeif_psc_alpha.py b/python/examples/plot_aeif_psc_alpha.py index 0f0461bc3..4bfc49060 100644 --- a/python/examples/plot_aeif_psc_alpha.py +++ b/python/examples/plot_aeif_psc_alpha.py @@ -1,12 +1,22 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 3e-6 -neuron = ngpu.Create('aeif_psc_alpha', 1) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, - "g_L":300.0, - "tau_syn_ex": 40.0, - "tau_syn_in": 20.0}) +neuron = ngpu.Create("aeif_psc_alpha", 1) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "tau_syn_ex": 40.0, + "tau_syn_in": 20.0, + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -16,43 +26,43 @@ delay = [1.0, 100.0] weight = [1.0, 2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_alpha_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_alpha_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_aeif_psc_alpha_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("../test/test_aeif_psc_alpha_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_aeif_psc_alpha_multisynapse.py b/python/examples/plot_aeif_psc_alpha_multisynapse.py index 005bb7dbf..3449fd232 100644 --- a/python/examples/plot_aeif_psc_alpha_multisynapse.py +++ b/python/examples/plot_aeif_psc_alpha_multisynapse.py @@ -1,10 +1,21 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_psc_alpha_multisynapse', 1, 2) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, \ - "g_L":300.0, "tau_syn":[40.0, 20.0]}) +neuron = ngpu.Create("aeif_psc_alpha_multisynapse", 1, 2) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "tau_syn": [40.0, 20.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -14,43 +25,43 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_alpha_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_alpha_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_aeif_psc_alpha_multisynapse_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("../test/test_aeif_psc_alpha_multisynapse_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_aeif_psc_delta.py b/python/examples/plot_aeif_psc_delta.py index 6be58afff..4b55961ab 100644 --- a/python/examples/plot_aeif_psc_delta.py +++ b/python/examples/plot_aeif_psc_delta.py @@ -1,10 +1,14 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_psc_delta') -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, \ - "g_L":300.0, "C_m":20000.0}) +neuron = ngpu.Create("aeif_psc_delta") +ngpu.SetStatus( + neuron, + {"V_peak": 0.0, "a": 4.0, "b": 80.5, "E_L": -70.6, "g_L": 300.0, "C_m": 20000.0}, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -15,43 +19,43 @@ # the aeif_psc_delta model has one port, negative inputs require negative weights weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_delta_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_delta_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_aeif_psc_delta_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("../test/test_aeif_psc_delta_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_aeif_psc_exp.py b/python/examples/plot_aeif_psc_exp.py index 50be115e3..df2c6f24e 100644 --- a/python/examples/plot_aeif_psc_exp.py +++ b/python/examples/plot_aeif_psc_exp.py @@ -1,13 +1,22 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_psc_exp', 1) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, - "E_L":-70.6, - "g_L":300.0, - "tau_syn_ex": 40.0, - "tau_syn_in": 20.0}) +neuron = ngpu.Create("aeif_psc_exp", 1) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "tau_syn_ex": 40.0, + "tau_syn_in": 20.0, + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -17,43 +26,43 @@ delay = [1.0, 100.0] weight = [1.0, 2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_aeif_psc_exp_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("../test/test_aeif_psc_exp_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_aeif_psc_exp_multisynapse.py b/python/examples/plot_aeif_psc_exp_multisynapse.py index b7c09f47d..9220ddc22 100644 --- a/python/examples/plot_aeif_psc_exp_multisynapse.py +++ b/python/examples/plot_aeif_psc_exp_multisynapse.py @@ -1,10 +1,21 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_psc_exp_multisynapse', 1, 2) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, \ - "g_L":300.0, "tau_syn":[40.0, 20.0]}) +neuron = ngpu.Create("aeif_psc_exp_multisynapse", 1, 2) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "tau_syn": [40.0, 20.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -14,43 +25,43 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_aeif_psc_exp_multisynapse_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("../test/test_aeif_psc_exp_multisynapse_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_fast_iaf_psc_exp.py b/python/examples/plot_fast_iaf_psc_exp.py index 362b19488..701ebea09 100644 --- a/python/examples/plot_fast_iaf_psc_exp.py +++ b/python/examples/plot_fast_iaf_psc_exp.py @@ -1,12 +1,14 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 E_L = -65.0 ngpu.SetKernelStatus("verbosity_level", 0) -neuron = ngpu.Create('iaf_psc_exp_g', 1) +neuron = ngpu.Create("iaf_psc_exp_g", 1) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] @@ -17,43 +19,43 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m_rel"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1]+E_L for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_iaf_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] + E_L for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_iaf_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_fast_iaf_psc_exp_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -#print (len(t)) -#print (len(t1)) +data = np.loadtxt("../test/test_fast_iaf_psc_exp_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +# print (len(t)) +# print (len(t1)) -#dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -#rmse =np.std(dV)/abs(np.mean(V_m)) -#print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] +# rmse =np.std(dV)/abs(np.mean(V_m)) +# print("rmse : ", rmse, " tolerance: ", tolerance) +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_iaf_psc_alpha.py b/python/examples/plot_iaf_psc_alpha.py index b0a01d48d..f2d37c26f 100644 --- a/python/examples/plot_iaf_psc_alpha.py +++ b/python/examples/plot_iaf_psc_alpha.py @@ -1,4 +1,5 @@ import sys + import nestgpu as ngpu import numpy as np @@ -12,7 +13,7 @@ """ -neuron = ngpu.Create('iaf_psc_alpha') +neuron = ngpu.Create("iaf_psc_alpha") spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -25,10 +26,10 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) @@ -37,14 +38,14 @@ ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[-70.0+row[1] for row in data_list] - -data = np.loadtxt('../test/test_iaf_psc_alpha_nest.txt', delimiter="\t") -t1=[x[0]+0.1 for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +t = [row[0] for row in data_list] +V_m = [-70.0 + row[1] for row in data_list] + +data = np.loadtxt("../test/test_iaf_psc_alpha_nest.txt", delimiter="\t") +t1 = [x[0] + 0.1 for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) import matplotlib.pyplot as plt diff --git a/python/examples/plot_iaf_psc_exp.py b/python/examples/plot_iaf_psc_exp.py index 70beaed7b..88ddee8f8 100644 --- a/python/examples/plot_iaf_psc_exp.py +++ b/python/examples/plot_iaf_psc_exp.py @@ -1,8 +1,10 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('iaf_psc_exp', 1) +neuron = ngpu.Create("iaf_psc_exp", 1) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -12,43 +14,43 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m_rel"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[-70.0+row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_iaf_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [-70.0 + row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_iaf_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_iaf_psc_exp_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -#print (len(t)) -#print (len(t1)) +data = np.loadtxt("../test/test_iaf_psc_exp_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +# print (len(t)) +# print (len(t1)) -#dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -#rmse =np.std(dV)/abs(np.mean(V_m)) -#print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] +# rmse =np.std(dV)/abs(np.mean(V_m)) +# print("rmse : ", rmse, " tolerance: ", tolerance) +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_iaf_psc_exp_g.py b/python/examples/plot_iaf_psc_exp_g.py index b85efe1bb..239e58694 100644 --- a/python/examples/plot_iaf_psc_exp_g.py +++ b/python/examples/plot_iaf_psc_exp_g.py @@ -1,8 +1,10 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('iaf_psc_exp_g', 1) +neuron = ngpu.Create("iaf_psc_exp_g", 1) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -12,43 +14,43 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m_rel"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[-65.0+row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_iaf_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [-65.0 + row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_iaf_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_fast_iaf_psc_exp_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -#print (len(t)) -#print (len(t1)) +data = np.loadtxt("../test/test_fast_iaf_psc_exp_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +# print (len(t)) +# print (len(t1)) -#dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -#rmse =np.std(dV)/abs(np.mean(V_m)) -#print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] +# rmse =np.std(dV)/abs(np.mean(V_m)) +# print("rmse : ", rmse, " tolerance: ", tolerance) +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_izh.py b/python/examples/plot_izh.py index dc5c490cb..ccaeb8846 100644 --- a/python/examples/plot_izh.py +++ b/python/examples/plot_izh.py @@ -1,9 +1,11 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('izhikevich', 1) -#ngpu.SetStatus(neuron, {"tau_syn": 1.0e-6}) +neuron = ngpu.Create("izhikevich", 1) +# ngpu.SetStatus(neuron, {"tau_syn": 1.0e-6}) spike = ngpu.Create("spike_generator") spike_times = [10.0, 40.0] n_spikes = 2 @@ -13,43 +15,43 @@ delay = [1.0, 10.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(80.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_iaf_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_iaf_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_izh_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -#print (len(t)) -#print (len(t1)) +data = np.loadtxt("../test/test_izh_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +# print (len(t)) +# print (len(t1)) -#dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -#rmse =np.std(dV)/abs(np.mean(V_m)) -#print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] +# rmse =np.std(dV)/abs(np.mean(V_m)) +# print("rmse : ", rmse, " tolerance: ", tolerance) +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/plot_izh_cond_beta.py b/python/examples/plot_izh_cond_beta.py index f2561285e..815aa985b 100644 --- a/python/examples/plot_izh_cond_beta.py +++ b/python/examples/plot_izh_cond_beta.py @@ -1,12 +1,19 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('izhikevich_cond_beta', 1, 3) -#ngpu.SetStatus(neuron, {'u':-0.2*70.0, 'V_m':-70.0}) -ngpu.SetStatus(neuron, {'E_rev':[20.0, 0.0, -85.0], \ - 'tau_decay':[40.0, 20.0, 30.0], \ - 'tau_rise':[20.0, 10.0, 5.0]}) +neuron = ngpu.Create("izhikevich_cond_beta", 1, 3) +# ngpu.SetStatus(neuron, {'u':-0.2*70.0, 'V_m':-70.0}) +ngpu.SetStatus( + neuron, + { + "E_rev": [20.0, 0.0, -85.0], + "tau_decay": [40.0, 20.0, 30.0], + "tau_rise": [20.0, 10.0, 5.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -14,43 +21,43 @@ # set spike times and heights ngpu.SetStatus(spike, {"spike_times": spike_times}) delay = [1.0, 100.0, 130.0] -fact=0.00225 -weight = [0.1*fact, 0.2*fact, 0.5*fact] +fact = 0.00225 +weight = [0.1 * fact, 0.2 * fact, 0.5 * fact] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} for syn in range(3): - syn_spec={'receptor': syn, 'weight': weight[syn], 'delay': delay[syn]} + syn_spec = {"receptor": syn, "weight": weight[syn], "delay": delay[syn]} ngpu.Connect(spike, neuron, conn_spec, syn_spec) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_user_m1_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_user_m1_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_aeif_cond_beta_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("../test/test_aeif_cond_beta_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -#if rmse>tolerance: +# if rmse>tolerance: # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt fig1 = plt.figure(1) diff --git a/python/examples/stdp.py b/python/examples/stdp.py index 36ae568e8..28da843eb 100644 --- a/python/examples/stdp.py +++ b/python/examples/stdp.py @@ -1,19 +1,20 @@ -import sys import math +import sys + import nestgpu as ngpu -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: + +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -31,21 +32,29 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ Wmax = 0.001 den_delay = 0.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) sg = ngpu.Create("spike_generator") neuron0 = ngpu.Create("aeif_cond_beta") neuron1 = ngpu.Create("aeif_cond_beta", N) -ngpu.SetStatus(neuron1, {"t_ref": 1000.0, "den_delay":den_delay}) +ngpu.SetStatus(neuron1, {"t_ref": 1000.0, "den_delay": den_delay}) time_diff = 400.0 dt_list = [] delay_stdp_list = [] for i in range(N): - dt_list.append(dt_step*(-0.5*(N-1) + i)) + dt_list.append(dt_step * (-0.5 * (N - 1) + i)) delay_stdp_list.append(time_diff - dt_list[i]) spike_times = [50.0] @@ -56,28 +65,31 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg, {"spike_times": spike_times}) delay0 = 1.0 delay1 = delay0 + time_diff -weight_sg = 17.9 # to make it spike immediately and only once -weight_stdp = Wmax/2 +weight_sg = 17.9 # to make it spike immediately and only once +weight_stdp = Wmax / 2 -conn_dict={"rule": "one_to_one"} -conn_dict_full={"rule": "all_to_all"} -syn_dict0={"weight":weight_sg, "delay":delay0} -syn_dict1={"weight":weight_sg, "delay":delay1} +conn_dict = {"rule": "one_to_one"} +conn_dict_full = {"rule": "all_to_all"} +syn_dict0 = {"weight": weight_sg, "delay": delay0} +syn_dict1 = {"weight": weight_sg, "delay": delay1} ngpu.Connect(sg, neuron0, conn_dict, syn_dict0) ngpu.Connect(sg, neuron1, conn_dict_full, syn_dict1) -syn_dict_stdp={"weight":weight_stdp, "delay_array":delay_stdp_list, \ - "synapse_group":syn_group} +syn_dict_stdp = { + "weight": weight_stdp, + "delay_array": delay_stdp_list, + "synapse_group": syn_group, +} ngpu.Connect(neuron0, neuron1, conn_dict_full, syn_dict_stdp) ngpu.Simulate(1000.0) -#conn_id = ngpu.GetConnections(neuron0, neuron1) +# conn_id = ngpu.GetConnections(neuron0, neuron1) dt = dt_list -#w = ngpu.GetStatus(conn_id, "weight") +# w = ngpu.GetStatus(conn_id, "weight") expect_w = [] @@ -86,16 +98,25 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ for i in range(N): conn_id = ngpu.GetConnections(neuron0, neuron1[i]) w = ngpu.GetStatus(conn_id, "weight") - w1 = STDPUpdate(weight_stdp, dt[i], tau_plus, tau_minus, lambd*Wmax, \ - alpha, mu_plus, mu_minus, Wmax) + w1 = STDPUpdate( + weight_stdp, + dt[i], + tau_plus, + tau_minus, + lambd * Wmax, + alpha, + mu_plus, + mu_minus, + Wmax, + ) expect_w.append(w1) sim_w.append(w[0]) - dw.append(w1-w[0]) - if abs(dw[i])>tolerance: + dw.append(w1 - w[0]) + if abs(dw[i]) > tolerance: print("Expected weight: ", w1, " simulated: ", w) - #sys.exit(1) + # sys.exit(1) -#sys.exit(0) +# sys.exit(0) import matplotlib.pyplot as plt diff --git a/python/examples/temp_mpi.py b/python/examples/temp_mpi.py index 6a0065031..87b7dbcc4 100644 --- a/python/examples/temp_mpi.py +++ b/python/examples/temp_mpi.py @@ -1,8 +1,9 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + """ Example of a balanced network executed using MPI. A network of n_neurons is created in every MPI process, @@ -12,38 +13,38 @@ """ -ngpu.ConnectMpiInit(); +ngpu.ConnectMpiInit() mpi_np = ngpu.MpiNp() if (mpi_np < 2) | (len(sys.argv) != 2): - print ("Usage: mpirun -np NP python %s n_neurons" % sys.argv[0]) + print("Usage: mpirun -np NP python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])//5 + +order = int(sys.argv[1]) // 5 mpi_id = ngpu.MpiId() print("Building on host ", mpi_id, " ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -delay = 1.0 # synaptic delay in ms +delay = 1.0 # synaptic delay in ms -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -55,23 +56,21 @@ inh_neuron = [] for i in range(mpi_np): neuron.append(ngpu.RemoteCreate(i, "aeif_cond_beta_multisynapse", n_neurons, n_receptors).node_seq) - exc_neuron.append(neuron[i][0:NE]) # excitatory neurons - inh_neuron.append(neuron[i][NE:n_neurons]) # inhibitory neurons + exc_neuron.append(neuron[i][0:NE]) # excitatory neurons + inh_neuron.append(neuron[i][NE:n_neurons]) # inhibitory neurons # receptor parameters E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron[mpi_id], {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron[mpi_id], {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) # Excitatory local connections, defined on all hosts # connect excitatory neurons to port 0 of all neurons # weight Wex and fixed indegree CE*3/4 -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE*3//4} -exc_syn_dict={"weight": Wex, "delay": delay, - "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE * 3 // 4} +exc_syn_dict = {"weight": Wex, "delay": delay, "receptor": 0} ngpu.Connect(exc_neuron[mpi_id], neuron[mpi_id], exc_conn_dict, exc_syn_dict) @@ -79,28 +78,29 @@ # connect inhibitory neurons to port 1 of all neurons # weight Win and fixed indegree CI*3/4 -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI*3//4} -inh_syn_dict={"weight": Win, "delay": delay, - "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI * 3 // 4} +inh_syn_dict = {"weight": Win, "delay": delay, "receptor": 1} ngpu.Connect(inh_neuron[mpi_id], neuron[mpi_id], inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron[mpi_id], pg_conn_dict, pg_syn_dict) filename = "test_brunel_mpi" + str(mpi_id) + ".dat" -i_neuron_arr = [neuron[mpi_id][0], neuron[mpi_id][randrange(n_neurons)], neuron[mpi_id][n_neurons-1]] +i_neuron_arr = [ + neuron[mpi_id][0], + neuron[mpi_id][randrange(n_neurons)], + neuron[mpi_id][n_neurons - 1], +] i_receptor_arr = [0, 0, 0] # any set of neuron indexes # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord(filename, var_name_arr, i_neuron_arr, i_receptor_arr) ###################################################################### ## WRITE HERE REMOTE CONNECTIONS @@ -110,44 +110,42 @@ # connect excitatory neurons to port 0 of all neurons # weight Wex and fixed indegree CE//4 # host 0 to host 1 -re_conn_dict={"rule": "fixed_indegree", "indegree": (CE//4)//(mpi_np-1)} -re_syn_dict={"weight": Wex, "delay": delay, - "receptor":0} +re_conn_dict = {"rule": "fixed_indegree", "indegree": (CE // 4) // (mpi_np - 1)} +re_syn_dict = {"weight": Wex, "delay": delay, "receptor": 0} # host 0 to host 1 -#ngpu.RemoteConnect(0, exc_neuron[0], 1, neuron[1], re_conn_dict, re_syn_dict) +# ngpu.RemoteConnect(0, exc_neuron[0], 1, neuron[1], re_conn_dict, re_syn_dict) # host 1 to host 0 -#ngpu.RemoteConnect(1, exc_neuron[1], 0, neuron[0], re_conn_dict, re_syn_dict) +# ngpu.RemoteConnect(1, exc_neuron[1], 0, neuron[0], re_conn_dict, re_syn_dict) # Inhibitory remote connections # connect inhibitory neurons to port 1 of all neurons # weight Win and fixed indegree CI//4 # host 0 to host 1 -ri_conn_dict={"rule": "fixed_indegree", "indegree": (CI//4)//(mpi_np-1)} -ri_syn_dict={"weight": Win, "delay": delay, - "receptor":1} +ri_conn_dict = {"rule": "fixed_indegree", "indegree": (CI // 4) // (mpi_np - 1)} +ri_syn_dict = {"weight": Win, "delay": delay, "receptor": 1} # host 0 to host 1 -#ngpu.RemoteConnect(0, inh_neuron[0], 1, neuron[1], ri_conn_dict, ri_syn_dict) +# ngpu.RemoteConnect(0, inh_neuron[0], 1, neuron[1], ri_conn_dict, ri_syn_dict) # host 1 to host 0 -#ngpu.RemoteConnect(1, inh_neuron[1], 0, neuron[0], ri_conn_dict, ri_syn_dict) +# ngpu.RemoteConnect(1, inh_neuron[1], 0, neuron[0], ri_conn_dict, ri_syn_dict) for i in range(mpi_np): for j in range(mpi_np): - if(i!=j): + if i != j: ngpu.RemoteConnect(i, exc_neuron[i], j, neuron[j], re_conn_dict, re_syn_dict) ngpu.RemoteConnect(i, inh_neuron[i], j, neuron[j], ri_conn_dict, ri_syn_dict) ngpu.Simulate() -nrows=ngpu.GetRecordDataRows(record) -ncol=ngpu.GetRecordDataColumns(record) -#print nrows, ncol +nrows = ngpu.GetRecordDataRows(record) +ncol = ngpu.GetRecordDataColumns(record) +# print nrows, ncol data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/hpc_benchmark/hpc_benchmark.py b/python/hpc_benchmark/hpc_benchmark.py index b57502ade..4c7417ade 100644 --- a/python/hpc_benchmark/hpc_benchmark.py +++ b/python/hpc_benchmark/hpc_benchmark.py @@ -27,7 +27,7 @@ to NEST in the average firing rate of neurons. This script produces a balanced random network of `scale*11250` neurons -connected with static connections. The number of incoming connections +connected with static connections. The number of incoming connections per neuron is fixed and independent of network size (indegree=11250). Furthermore, the scale can be also increased through running the script @@ -76,18 +76,16 @@ arXiv. 2110.02883 """ +import json import os import sys -import json -import numpy as np -import scipy.special as sp -import matplotlib.pyplot as plt - +from argparse import ArgumentParser from time import perf_counter_ns +import matplotlib.pyplot as plt import nestgpu as ngpu - -from argparse import ArgumentParser +import numpy as np +import scipy.special as sp parser = ArgumentParser() parser.add_argument("--path", type=str, default=".") @@ -108,32 +106,33 @@ params = { - 'scale': 1., # scaling factor of the network size - # total network size = scale*11250 neurons - 'seed': args.seed, # seed for random number generation - 'simtime': 250., # total simulation time in ms - 'presimtime': 50., # simulation time until reaching equilibrium - 'dt': 0.1, # simulation step - 'stdp': False, # enable plastic connections [feature not properlyly implemented yet!] - 'record_spikes': False, # switch to record spikes of excitatory - # neurons to file - 'show_plot': False, # switch to show plot at the end of simulation - # disabled by default for benchmarking - 'raster_plot': False, # when record_spikes=True, depicts a raster plot - 'path_name': args.path, # path where all files will have to be written - 'log_file': 'log', # naming scheme for the log files - 'use_all_to_all': False, # Connect using all to all rule - 'check_conns': False, # Get ConnectionId objects after build. VERY SLOW! - 'use_dc_input': False, # Use DC input instead of Poisson generators - 'verbose_log': False, # Enable verbose output per MPI process + "scale": 1.0, # scaling factor of the network size + # total network size = scale*11250 neurons + "seed": args.seed, # seed for random number generation + "simtime": 250.0, # total simulation time in ms + "presimtime": 50.0, # simulation time until reaching equilibrium + "dt": 0.1, # simulation step + "stdp": False, # enable plastic connections [feature not properlyly implemented yet!] + "record_spikes": False, # switch to record spikes of excitatory + # neurons to file + "show_plot": False, # switch to show plot at the end of simulation + # disabled by default for benchmarking + "raster_plot": False, # when record_spikes=True, depicts a raster plot + "path_name": args.path, # path where all files will have to be written + "log_file": "log", # naming scheme for the log files + "use_all_to_all": False, # Connect using all to all rule + "check_conns": False, # Get ConnectionId objects after build. VERY SLOW! + "use_dc_input": False, # Use DC input instead of Poisson generators + "verbose_log": False, # Enable verbose output per MPI process } def rank_print(message): """Prints message and attaches MPI rank""" - if params['verbose_log']: + if params["verbose_log"]: print(f"MPI RANK {mpi_id}: {message}") + rank_print("Simulation with {} MPI processes".format(mpi_np)) @@ -156,10 +155,13 @@ def convert_synapse_weight(tau_m, tau_syn, C_m): b = 1.0 / tau_syn - 1.0 / tau_m t_rise = 1.0 / b * (-lambertwm1(-np.exp(-1.0 / a) / a).real - 1.0 / a) - v_max = np.exp(1.0) / (tau_syn * C_m * b) * ( - (np.exp(-t_rise / tau_m) - np.exp(-t_rise / tau_syn)) / - b - t_rise * np.exp(-t_rise / tau_syn)) - return 1. / v_max + v_max = ( + np.exp(1.0) + / (tau_syn * C_m * b) + * ((np.exp(-t_rise / tau_m) - np.exp(-t_rise / tau_syn)) / b - t_rise * np.exp(-t_rise / tau_syn)) + ) + return 1.0 / v_max + def dc_input_compensating_poisson(*args, **kwargs): """TEST FUNCTION @@ -167,6 +169,7 @@ def dc_input_compensating_poisson(*args, **kwargs): """ return 500.1 + ############################################################################### # For compatibility with earlier benchmarks, we require a rise time of # ``t_rise = 1.700759 ms`` and we choose ``tau_syn`` to achieve this for given @@ -179,93 +182,97 @@ def dc_input_compensating_poisson(*args, **kwargs): brunel_params = { - 'NE': int(9000 * params['scale']), # number of excitatory neurons - 'NI': int(2250 * params['scale']), # number of inhibitory neurons - - 'model_params': { # Set variables for iaf_psc_alpha - 'E_L': 0.0, # Resting membrane potential(mV) - 'C_m': 250.0, # Capacity of the membrane(pF) - 'tau_m': 10.0, # Membrane time constant(ms) - 't_ref': 0.5, # Duration of refractory period(ms) - 'Theta_rel': 20.0, # Threshold(mV) - 'V_reset_rel': 0.0, # Reset Potential(mV) + "NE": int(9000 * params["scale"]), # number of excitatory neurons + "NI": int(2250 * params["scale"]), # number of inhibitory neurons + "model_params": { # Set variables for iaf_psc_alpha + "E_L": 0.0, # Resting membrane potential(mV) + "C_m": 250.0, # Capacity of the membrane(pF) + "tau_m": 10.0, # Membrane time constant(ms) + "t_ref": 0.5, # Duration of refractory period(ms) + "Theta_rel": 20.0, # Threshold(mV) + "V_reset_rel": 0.0, # Reset Potential(mV) # time const. postsynaptic excitatory currents(ms) - 'tau_syn_ex': tau_syn, + "tau_syn_ex": tau_syn, # time const. postsynaptic inhibitory currents(ms) - 'tau_syn_in': tau_syn, + "tau_syn_in": tau_syn, #'tau_minus': 30.0, # time constant for STDP(depression) # V can be randomly initialized see below - 'V_m_rel': 0.0 #5.7 # mean value of membrane potential + "V_m_rel": 0.0, # 5.7 # mean value of membrane potential }, - #################################################################### # Note that Kunkel et al. (2014) report different values. The values # in the paper were used for the benchmarks on K, the values given # here were used for the benchmark on JUQUEEN. - - 'randomize_Vm': True, - 'mean_potential': 5.7, - 'sigma_potential': 7.2, - - 'delay': 1.5, # synaptic delay, all alpha connections(ms) - + "randomize_Vm": True, + "mean_potential": 5.7, + "sigma_potential": 7.2, + "delay": 1.5, # synaptic delay, all alpha connections(ms) # synaptic weight - 'JE': 0.14, # peak of EPSP - - 'sigma_w': 3.47, # standard dev. of E->E synapses(pA) - 'g': -5.0, - + "JE": 0.14, # peak of EPSP + "sigma_w": 3.47, # standard dev. of E->E synapses(pA) + "g": -5.0, # stdp synapses still to be implemented correctly - 'stdp_params': { - 'alpha': 0.0513, - 'lambda': 0.1, # STDP step size - 'mu_plus': 0.4, # STDP weight dependence exponent(potentiation) - 'mu_minus': 0.4, # STDP weight dependence exponent(depression) - 'tau_plus': 15.0, # time constant for potentiation - 'tau_minus': 15.0, # time constant for depression + "stdp_params": { + "alpha": 0.0513, + "lambda": 0.1, # STDP step size + "mu_plus": 0.4, # STDP weight dependence exponent(potentiation) + "mu_minus": 0.4, # STDP weight dependence exponent(depression) + "tau_plus": 15.0, # time constant for potentiation + "tau_minus": 15.0, # time constant for depression }, - 'stdp_delay': 1.5, - - 'eta': 1.685, # scaling of external stimulus - 'filestem': params['path_name'] + "stdp_delay": 1.5, + "eta": 1.685, # scaling of external stimulus + "filestem": params["path_name"], } ############################################################################### # Function Section + def build_network(): """Builds the network including setting of simulation and neuron parameters, creation of neurons and connections. Uses a dictionary to store information about the network construction times. - + Returns recorded neuron ids if spike recording is enabled, and the time dictionary. """ time_start = perf_counter_ns() # start timer on construction # unpack a few variables for convenience - NE = brunel_params['NE'] - NI = brunel_params['NI'] - model_params = brunel_params['model_params'] - stdp_params = brunel_params['stdp_params'] + NE = brunel_params["NE"] + NI = brunel_params["NI"] + model_params = brunel_params["model_params"] + stdp_params = brunel_params["stdp_params"] - rank_print('Creating neuron populations.') + rank_print("Creating neuron populations.") - neurons = []; E_pops = []; I_pops = [] + neurons = [] + E_pops = [] + I_pops = [] for i in range(mpi_np): - neurons.append(ngpu.RemoteCreate(i, 'iaf_psc_alpha', NE+NI, 1, model_params).node_seq) + neurons.append(ngpu.RemoteCreate(i, "iaf_psc_alpha", NE + NI, 1, model_params).node_seq) E_pops.append(neurons[i][0:NE]) - I_pops.append(neurons[i][NE:NE+NI]) - - if brunel_params['randomize_Vm']: - rank_print('Randomizing membrane potentials.') - ngpu.SetStatus(neurons[mpi_id], {"V_m_rel": {"distribution": "normal", "mu": brunel_params['mean_potential'], "sigma": brunel_params['sigma_potential']}}) + I_pops.append(neurons[i][NE : NE + NI]) + + if brunel_params["randomize_Vm"]: + rank_print("Randomizing membrane potentials.") + ngpu.SetStatus( + neurons[mpi_id], + { + "V_m_rel": { + "distribution": "normal", + "mu": brunel_params["mean_potential"], + "sigma": brunel_params["sigma_potential"], + } + }, + ) # total number of incoming excitatory connections - CE = int(1. * NE / params['scale']) + CE = int(1.0 * NE / params["scale"]) # total number of incomining inhibitory connections - CI = int(1. * NI / params['scale']) + CI = int(1.0 * NI / params["scale"]) # number of indegrees from each MPI process # here the indegrees are equally distributed among the @@ -274,31 +281,32 @@ def build_network(): CE_distrib = int(1.0 * CE / (mpi_np)) CI_distrib = int(1.0 * CI / (mpi_np)) - rank_print('Creating excitatory stimulus generator.') + rank_print("Creating excitatory stimulus generator.") # Convert synapse weight from mV to pA - conversion_factor = convert_synapse_weight(model_params['tau_m'], model_params['tau_syn_ex'], model_params['C_m']) - JE_pA = conversion_factor * brunel_params['JE'] - - nu_thresh = model_params['Theta_rel'] / ( CE * model_params['tau_m'] / model_params['C_m'] * JE_pA * np.exp(1.) * tau_syn) - nu_ext = nu_thresh * brunel_params['eta'] - rate = nu_ext * CE * 1000. - if not params['use_dc_input']: + conversion_factor = convert_synapse_weight(model_params["tau_m"], model_params["tau_syn_ex"], model_params["C_m"]) + JE_pA = conversion_factor * brunel_params["JE"] + + nu_thresh = model_params["Theta_rel"] / ( + CE * model_params["tau_m"] / model_params["C_m"] * JE_pA * np.exp(1.0) * tau_syn + ) + nu_ext = nu_thresh * brunel_params["eta"] + rate = nu_ext * CE * 1000.0 + if not params["use_dc_input"]: brunel_params["poisson_rate"] = rate - E_stim= ngpu.Create('poisson_generator', 1, 1, {'rate': rate}) + E_stim = ngpu.Create("poisson_generator", 1, 1, {"rate": rate}) else: - inh_amp = dc_input_compensating_poisson(rate, CI, tau_syn, brunel_params['g'] * JE_pA) + inh_amp = dc_input_compensating_poisson(rate, CI, tau_syn, brunel_params["g"] * JE_pA) ex_amp = dc_input_compensating_poisson(rate, CE, tau_syn, JE_pA) brunel_params["DC_amp_I"] = inh_amp brunel_params["DC_amp_E"] = ex_amp ngpu.SetStatus(I_pops[mpi_id], {"I_e": inh_amp}) ngpu.SetStatus(E_pops[mpi_id], {"I_e": ex_amp}) + rank_print("Creating excitatory spike recorder.") - rank_print('Creating excitatory spike recorder.') - - if params['record_spikes']: - recorder_label = 'alpha_' + str(stdp_params['alpha']) + '_spikes_' + str(mpi_id) + if params["record_spikes"]: + recorder_label = "alpha_" + str(stdp_params["alpha"]) + "_spikes_" + str(mpi_id) brunel_params["recorder_label"] = recorder_label ngpu.ActivateRecSpikeTimes(neurons[mpi_id], 1000) record = ngpu.CreateRecord("", ["V_m_rel"], [neurons[mpi_id][0]], [0]) @@ -306,59 +314,65 @@ def build_network(): time_create = perf_counter_ns() syn_dict_ex = None - syn_dict_in = {'weight': brunel_params['g'] * JE_pA, 'delay': brunel_params['delay']} - if params['stdp']: - syn_group_stdp = ngpu.CreateSynGroup('stdp', stdp_params) - syn_dict_ex = {"weight": JE_pA, "delay": brunel_params['stdp_delay'], "synapse_group": syn_group_stdp} + syn_dict_in = { + "weight": brunel_params["g"] * JE_pA, + "delay": brunel_params["delay"], + } + if params["stdp"]: + syn_group_stdp = ngpu.CreateSynGroup("stdp", stdp_params) + syn_dict_ex = { + "weight": JE_pA, + "delay": brunel_params["stdp_delay"], + "synapse_group": syn_group_stdp, + } else: - syn_dict_ex = {'weight': JE_pA, 'delay': brunel_params['delay']} + syn_dict_ex = {"weight": JE_pA, "delay": brunel_params["delay"]} - if mpi_id==0: - rank_print("Synaptic weights: JE={}; JI={}".format(JE_pA, JE_pA*brunel_params['g'])) + if mpi_id == 0: + rank_print("Synaptic weights: JE={}; JI={}".format(JE_pA, JE_pA * brunel_params["g"])) if not params["use_dc_input"]: - rank_print('Connecting stimulus generators.') + rank_print("Connecting stimulus generators.") # connect Poisson generator to neuron - my_connect(E_stim, neurons[mpi_id], {'rule': 'all_to_all'}, syn_dict_ex) + my_connect(E_stim, neurons[mpi_id], {"rule": "all_to_all"}, syn_dict_ex) - rank_print('Creating local connections.') - rank_print('Connecting excitatory -> excitatory population.') + rank_print("Creating local connections.") + rank_print("Connecting excitatory -> excitatory population.") - if params['use_all_to_all']: - i_conn_rule = {'rule': 'all_to_all'} - e_conn_rule = {'rule': 'all_to_all'} + if params["use_all_to_all"]: + i_conn_rule = {"rule": "all_to_all"} + e_conn_rule = {"rule": "all_to_all"} else: - i_conn_rule = {'rule': 'fixed_indegree', 'indegree': CI_distrib} - e_conn_rule = {'rule': 'fixed_indegree', 'indegree': CE_distrib} + i_conn_rule = {"rule": "fixed_indegree", "indegree": CI_distrib} + e_conn_rule = {"rule": "fixed_indegree", "indegree": CE_distrib} + + brunel_params["connection_rules"] = { + "inhibitory": i_conn_rule, + "excitatory": e_conn_rule, + } + + my_connect(E_pops[mpi_id], neurons[mpi_id], e_conn_rule, syn_dict_ex) - brunel_params["connection_rules"] = {"inhibitory": i_conn_rule, "excitatory": e_conn_rule} - - my_connect(E_pops[mpi_id], neurons[mpi_id], - e_conn_rule, syn_dict_ex) + my_connect(I_pops[mpi_id], neurons[mpi_id], i_conn_rule, syn_dict_in) - my_connect(I_pops[mpi_id], neurons[mpi_id], - i_conn_rule, syn_dict_in) - time_connect_local = perf_counter_ns() - rank_print('Creating remote connections.') - + rank_print("Creating remote connections.") + for i in range(mpi_np): for j in range(mpi_np): - if(i!=j): - rank_print('Connecting excitatory {} -> excitatory {} population.'.format(i, j)) + if i != j: + rank_print("Connecting excitatory {} -> excitatory {} population.".format(i, j)) + + my_remoteconnect(i, E_pops[i], j, neurons[j], e_conn_rule, syn_dict_ex) - my_remoteconnect(i, E_pops[i], j, neurons[j], - e_conn_rule, syn_dict_ex) + rank_print("Connecting inhibitory {} -> excitatory {} population.".format(i, j)) - rank_print('Connecting inhibitory {} -> excitatory {} population.'.format(i, j)) - - my_remoteconnect(i, I_pops[i], j, neurons[j], - i_conn_rule, syn_dict_in) + my_remoteconnect(i, I_pops[i], j, neurons[j], i_conn_rule, syn_dict_in) - rank_print('Connecting excitatory {} -> inhibitory {} population.'.format(i, j)) + rank_print("Connecting excitatory {} -> inhibitory {} population.".format(i, j)) - rank_print('Connecting inhibitory {} -> inhibitory {} population.'.format(i, j)) + rank_print("Connecting inhibitory {} -> inhibitory {} population.".format(i, j)) # read out time used for building time_connect_remote = perf_counter_ns() @@ -367,11 +381,11 @@ def build_network(): "time_create": time_create - time_start, "time_connect_local": time_connect_local - time_create, "time_connect_remote": time_connect_remote - time_connect_local, - "time_connect": time_connect_remote - time_create + "time_connect": time_connect_remote - time_create, } conns = None - if params['check_conns']: + if params["check_conns"]: conns = dict() for i in range(mpi_np): if mpi_id == i: @@ -389,7 +403,12 @@ def build_network(): time_dict["time_check_connect"] = time_check_connect - time_connect_remote - return neurons[mpi_id], record if params['record_spikes'] else None, conns, time_dict + return ( + neurons[mpi_id], + record if params["record_spikes"] else None, + conns, + time_dict, + ) def run_simulation(): @@ -397,13 +416,15 @@ def run_simulation(): time_start = perf_counter_ns() - ngpu.SetKernelStatus({ - "verbosity_level": 4, - "rnd_seed": params["seed"], - "time_resolution": params['dt'] - }) + ngpu.SetKernelStatus( + { + "verbosity_level": 4, + "rnd_seed": params["seed"], + "time_resolution": params["dt"], + } + ) seed = ngpu.GetKernelStatus("rnd_seed") - + time_initialize = perf_counter_ns() neurons, record, conns, time_dict = build_network() @@ -414,57 +435,54 @@ def run_simulation(): time_calibrate = perf_counter_ns() - ngpu.Simulate(params['presimtime']) + ngpu.Simulate(params["presimtime"]) time_presimulate = perf_counter_ns() - ngpu.Simulate(params['simtime']) + ngpu.Simulate(params["simtime"]) time_simulate = perf_counter_ns() - time_dict.update({ + time_dict.update( + { "time_initialize": time_initialize - time_start, "time_construct": time_construct - time_initialize, "time_calibrate": time_calibrate - time_construct, "time_presimulate": time_presimulate - time_calibrate, "time_simulate": time_simulate - time_presimulate, - "time_total": time_simulate - time_start - }) + "time_total": time_simulate - time_start, + } + ) conf_dict = { "num_processes": mpi_np, "brunel_params": brunel_params, - "simulation_params": params + "simulation_params": params, } - info_dict = { - "rank": mpi_id, - "seed": seed, - "conf": conf_dict, - "timers": time_dict - } + info_dict = {"rank": mpi_id, "seed": seed, "conf": conf_dict, "timers": time_dict} - if params['record_spikes']: - e_stats, e_data, i_stats, i_data= get_spike_times(neurons) + if params["record_spikes"]: + e_stats, e_data, i_stats, i_data = get_spike_times(neurons) e_rate = compute_rate(*e_stats) i_rate = compute_rate(*i_stats) info_dict["stats"] = { "excitatory_firing_rate": e_rate, - "inhibitory_firing_rate": i_rate + "inhibitory_firing_rate": i_rate, } - - if params['show_plot']: + + if params["show_plot"]: recorded_data = ngpu.GetRecordData(record) time = [row[0] for row in recorded_data] V_m = [row[1] for row in recorded_data] plt.figure(mpi_id) - plt.plot(time, V_m, '-r') + plt.plot(time, V_m, "-r") plt.draw() - if params['raster_plot']: + if params["raster_plot"]: raster_plot(e_data, i_data) - if params['check_conns']: - with open(os.path.join(params['path_name'], f"connections_{mpi_id}.json"), 'w') as f: + if params["check_conns"]: + with open(os.path.join(params["path_name"], f"connections_{mpi_id}.json"), "w") as f: json.dump(conns, f, indent=4) k_status = ngpu.GetKernelStatus() @@ -472,16 +490,21 @@ def run_simulation(): rank_print(json.dumps(info_dict, indent=4)) - with open(os.path.join(params['path_name'], params['log_file'] + f"_{mpi_id}.json"), 'w') as f: + with open(os.path.join(params["path_name"], params["log_file"] + f"_{mpi_id}.json"), "w") as f: json.dump(info_dict, f, indent=4) + def my_connect(source, target, conn_dict, syn_dict): rank_print("MY id {} LOCAL Source {} {} | Target {} {}".format(mpi_id, source.i0, source.n, target.i0, target.n)) ngpu.Connect(source, target, conn_dict, syn_dict) def my_remoteconnect(source_host, source, target_host, target, conn_dict, syn_dict): - rank_print("MY id {} REMOTE Source {} {} {} | Target {} {} {}".format(mpi_id, source_host, source.i0, source.n, target_host, target.i0, target.n)) + rank_print( + "MY id {} REMOTE Source {} {} {} | Target {} {} {}".format( + mpi_id, source_host, source.i0, source.n, target_host, target.i0, target.n + ) + ) ngpu.RemoteConnect(source_host, source, target_host, target, conn_dict, syn_dict) @@ -503,36 +526,41 @@ def get_spike_times(neurons): # select excitatory neurons e_count = 0 e_data = [] - e_bound = brunel_params['NE'] + e_bound = brunel_params["NE"] i_count = 0 i_data = [] - i_bound = brunel_params['NE'] + brunel_params['NI'] + i_bound = brunel_params["NE"] + brunel_params["NI"] for i_neur in range(i_bound): spikes = spike_times[i_neur] if len(spikes) != 0: if i_neur < e_bound: for t in spikes: - if t > params['presimtime']: + if t > params["presimtime"]: e_count += 1 e_data.append([i_neur, t]) else: for t in spikes: - if t > params['presimtime']: + if t > params["presimtime"]: i_count += 1 i_data.append([i_neur, t]) # Save data if len(e_data) > 0: e_array = np.array(e_data) - e_fn = os.path.join(brunel_params['filestem'], brunel_params["recorder_label"] + "_e_pop.dat") - np.savetxt(e_fn, e_array, fmt='%d\t%.3f', header="sender time_ms", comments='') + e_fn = os.path.join(brunel_params["filestem"], brunel_params["recorder_label"] + "_e_pop.dat") + np.savetxt(e_fn, e_array, fmt="%d\t%.3f", header="sender time_ms", comments="") if len(i_data) > 0: i_array = np.array(i_data) - i_fn = os.path.join(brunel_params['filestem'], brunel_params["recorder_label"] + "_i_pop.dat") - np.savetxt(i_fn, i_array, fmt='%d\t%.3f', header="sender time_ms", comments='') + i_fn = os.path.join(brunel_params["filestem"], brunel_params["recorder_label"] + "_i_pop.dat") + np.savetxt(i_fn, i_array, fmt="%d\t%.3f", header="sender time_ms", comments="") - return (brunel_params['NE'], e_count), e_data, (brunel_params['NI'], i_count), i_data + return ( + (brunel_params["NE"], e_count), + e_data, + (brunel_params["NI"], i_count), + i_data, + ) def compute_rate(num_neurons, spike_count): @@ -544,34 +572,41 @@ def compute_rate(num_neurons, spike_count): if spike_count < 1: return 0 - time_frame = params['simtime'] + time_frame = params["simtime"] + + return 1.0 * spike_count / (num_neurons * time_frame) * 1e3 - return (1. * spike_count / (num_neurons * time_frame) * 1e3) def raster_plot(e_st, i_st): - fs = 18 # fontsize - colors = ['#595289', '#af143c'] - e_ids = np.zeros(len(e_st)); i_ids = np.zeros(len(i_st)) - e_times = np.zeros(len(e_st)); i_times = np.zeros(len(i_st)) + fs = 18 # fontsize + colors = ["#595289", "#af143c"] + e_ids = np.zeros(len(e_st)) + i_ids = np.zeros(len(i_st)) + e_times = np.zeros(len(e_st)) + i_times = np.zeros(len(i_st)) for i in range(len(e_st)): - e_ids[i]=e_st[i][0] - e_times[i]=e_st[i][1] + e_ids[i] = e_st[i][0] + e_times[i] = e_st[i][1] for i in range(len(i_st)): - i_ids[i]=i_st[i][0] - i_times[i]=i_st[i][1] - + i_ids[i] = i_st[i][0] + i_times[i] = i_st[i][1] + plt.figure(1, figsize=(16, 10)) - plt.plot(e_times, e_ids, '.', color=colors[0]) - plt.plot(i_times, i_ids, '.', color=colors[1]) - plt.xlabel('time [ms]', fontsize=fs) - plt.ylabel('neuron ID', fontsize=fs) + plt.plot(e_times, e_ids, ".", color=colors[0]) + plt.plot(i_times, i_ids, ".", color=colors[1]) + plt.xlabel("time [ms]", fontsize=fs) + plt.ylabel("neuron ID", fontsize=fs) plt.xticks(fontsize=fs) plt.yticks(fontsize=fs) plt.tight_layout() - plt.savefig(os.path.join(brunel_params['filestem'], 'raster_plot'+ str(mpi_id) +'.png'), dpi=300) + plt.savefig( + os.path.join(brunel_params["filestem"], "raster_plot" + str(mpi_id) + ".png"), + dpi=300, + ) + -if __name__ == '__main__': +if __name__ == "__main__": run_simulation() - if params['show_plot']: + if params["show_plot"]: plt.show() ngpu.MpiFinalize() diff --git a/python/test/err.py b/python/test/err.py index 43684a213..0006ee404 100644 --- a/python/test/err.py +++ b/python/test/err.py @@ -1,19 +1,20 @@ import sys + import nestgpu as ngpu tolerance = 1.0e-6 neuron = ngpu.Create("aeif_cond_beta", 3) -ngpu.SetStatus(neuron, {"I_e":1000.0}) +ngpu.SetStatus(neuron, {"I_e": 1000.0}) spike_det = ngpu.Create("spike_detector") -conn_dict={"rule": "one_to_one"} -syn_dict1={"weight": 1.0, "delay": 10.0, "receptor":0} -syn_dict2={"weight": 2.0, "delay": 20.0, "receptor":0} -syn_dict3={"weight": 3.0, "delay": 30.0, "receptor":0} +conn_dict = {"rule": "one_to_one"} +syn_dict1 = {"weight": 1.0, "delay": 10.0, "receptor": 0} +syn_dict2 = {"weight": 2.0, "delay": 20.0, "receptor": 0} +syn_dict3 = {"weight": 3.0, "delay": 30.0, "receptor": 0} ngpu.Connect(neuron[0:0], spike_det, conn_dict, syn_dict1) @@ -28,38 +29,38 @@ ngpu.Simulate() data_n = ngpu.GetRecordData(record_n) -t_n=[row[0] for row in data_n] -spike_n=[row[1] for row in data_n] +t_n = [row[0] for row in data_n] +spike_n = [row[1] for row in data_n] data_sd = ngpu.GetRecordData(record_sd) -t_sd=[row[0] for row in data_sd] -spike_sd=[row[1] for row in data_sd] +t_sd = [row[0] for row in data_sd] +spike_sd = [row[1] for row in data_sd] -for i in range(len(t_n)-400): - if spike_n[i]>0.5: +for i in range(len(t_n) - 400): + if spike_n[i] > 0.5: j1 = i + 101 j2 = i + 201 j3 = i + 301 - if abs(spike_sd[j1] - 1.0)>tolerance: + if abs(spike_sd[j1] - 1.0) > tolerance: print("Expected spike height: 1.0, simulated: ", spike_sd[j1]) sys.exit(1) - if abs(spike_sd[j2] - 2.0)>tolerance: + if abs(spike_sd[j2] - 2.0) > tolerance: print("Expected spike height: 2.0, simulated: ", spike_sd[j2]) sys.exit(1) - if abs(spike_sd[j3] - 3.0)>tolerance: + if abs(spike_sd[j3] - 3.0) > tolerance: print("Expected spike height: 3.0, simulated: ", spike_sd[j3]) sys.exit(1) - -#import matplotlib.pyplot as plt -#plt.figure(1) -#plt.plot(t_n, spike_n) +# import matplotlib.pyplot as plt + +# plt.figure(1) +# plt.plot(t_n, spike_n) -#plt.figure(2) -#plt.plot(t_sd, spike_sd) +# plt.figure(2) +# plt.plot(t_sd, spike_sd) -#plt.draw() -#plt.pause(1) -#raw_input("<Hit Enter To Close>") -#plt.close() +# plt.draw() +# plt.pause(1) +# raw_input("<Hit Enter To Close>") +# plt.close() sys.exit(1) diff --git a/python/test/err_mpi.py b/python/test/err_mpi.py index 98cd0397b..3b9d60543 100644 --- a/python/test/err_mpi.py +++ b/python/test/err_mpi.py @@ -1,16 +1,16 @@ -import sys -import math import ctypes -import nestgpu as ngpu +import math +import sys from random import randrange -import numpy as np +import nestgpu as ngpu +import numpy as np -ngpu.ConnectMpiInit(); +ngpu.ConnectMpiInit() mpi_np = ngpu.MpiNp() if mpi_np != 2: - print ("Usage: mpirun -np 2 python %s" % sys.argv[0]) + print("Usage: mpirun -np 2 python %s" % sys.argv[0]) quit() order = 100 @@ -21,24 +21,24 @@ mpi_id = ngpu.MpiId() print("Building on host ", mpi_id, " ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -47,8 +47,8 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons neuron_list = neuron.ToList() exc_neuron_list = exc_neuron.ToList() inh_neuron_list = inh_neuron.ToList() @@ -57,8 +57,7 @@ E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 @@ -67,36 +66,49 @@ # Excitatory local connections, defined on all hosts # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and fixed indegree CE//2 -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE//2} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE // 2} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron_list, exc_conn_dict, exc_syn_dict) # Inhibitory local connections, defined on all hosts # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and fixed indegree CI//2 -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI//2} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI // 2} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron_list, exc_neuron_list, inh_conn_dict, inh_syn_dict) ngpu.Connect(inh_neuron_list, inh_neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg_list, neuron_list, pg_conn_dict, pg_syn_dict) -i_neuron_list = [neuron[0], neuron[n_neurons-1]] +i_neuron_list = [neuron[0], neuron[n_neurons - 1]] i_receptor_list = [0, 0] var_name_list = ["spike", "spike"] - -for i in range(n_test-2): + +for i in range(n_test - 2): i_neuron_list.append(neuron[randrange(n_neurons)]) i_receptor_list.append(0) var_name_list.append("spike") @@ -112,8 +124,8 @@ # connect excitatory neurons to port 0 of all neurons # weight Wex and fixed indegree CE//2 # host 0 to host 1 -re_conn_dict={"rule": "fixed_indegree", "indegree": CE//2} -re_syn_dict=exc_syn_dict +re_conn_dict = {"rule": "fixed_indegree", "indegree": CE // 2} +re_syn_dict = exc_syn_dict # host 0 to host 1 ngpu.RemoteConnect(0, exc_neuron_list, 1, neuron, re_conn_dict, re_syn_dict) # host 1 to host 0 @@ -123,8 +135,8 @@ # connect inhibitory neurons to port 1 of all neurons # weight Win and fixed indegree CI//2 # host 0 to host 1 -ri_conn_dict={"rule": "fixed_indegree", "indegree": CI//2} -ri_syn_dict=inh_syn_dict +ri_conn_dict = {"rule": "fixed_indegree", "indegree": CI // 2} +ri_syn_dict = inh_syn_dict # host 0 to host 1 ngpu.RemoteConnect(0, inh_neuron, 1, neuron, ri_conn_dict, ri_syn_dict) # host 1 to host 0 @@ -135,53 +147,50 @@ data_list = ngpu.GetRecordData(record) for i in range(500): - conn_id = ngpu.GetConnections(i+1) + conn_id = ngpu.GetConnections(i + 1) n_out_conn = len(conn_id) - if (n_out_conn!=NE+NI): - print("Expected number of out connections per neuron: ", NE+NI) - print("Number of out connections of neuron ", i + 1, ": ", \ - n_out_conn) + if n_out_conn != NE + NI: + print("Expected number of out connections per neuron: ", NE + NI) + print("Number of out connections of neuron ", i + 1, ": ", n_out_conn) sys.exit(1) - + for i in range(10): i_target = randrange(n_neurons) - conn_id = ngpu.GetConnections(target=i_target+1) + conn_id = ngpu.GetConnections(target=i_target + 1) n_in_conn = len(conn_id) - if (n_in_conn!=2*(NE+NI)+1): - print("Expected number of in connections per neuron: ", 2*(NE+NI)+1) - print("Number of in connections of neuron ", i_target, ": ", \ - n_in_conn) + if n_in_conn != 2 * (NE + NI) + 1: + print("Expected number of in connections per neuron: ", 2 * (NE + NI) + 1) + print("Number of in connections of neuron ", i_target, ": ", n_in_conn) sys.exit(1) row_sum = list(data_list[0]) -for row in data_list[1:len(data_list)]: +for row in data_list[1 : len(data_list)]: for i in range(len(row_sum)): row_sum[i] = row_sum[i] + row[i] -spike = row_sum[1:len(row_sum)] +spike = row_sum[1 : len(row_sum)] spike_arr = np.array(spike) min_spike_num = np.min(spike_arr) max_spike_num = np.max(spike_arr) -if (min_spike_num < expected_rate - 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) +if min_spike_num < expected_rate - 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Min rate :", min_spike_num) sys.exit(1) - -if (max_spike_num > expected_rate + 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) + +if max_spike_num > expected_rate + 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Max rate :", max_spike_num) sys.exit(1) mean_spike_num = np.mean(spike_arr) diff = abs(mean_spike_num - expected_rate) -max_diff = 3.0*np.sqrt(expected_rate)/np.sqrt(n_test) -print ("Expected rate: ", expected_rate) +max_diff = 3.0 * np.sqrt(expected_rate) / np.sqrt(n_test) +print("Expected rate: ", expected_rate) print("Mean rate: ", mean_spike_num) if diff > max_diff: sys.exit(1) else: sys.exit(1) - diff --git a/python/test/logp3_connect.txt b/python/test/logp3_connect.txt index ed637ff5d..b460b0622 100644 --- a/python/test/logp3_connect.txt +++ b/python/test/logp3_connect.txt @@ -121,5 +121,3 @@ Even to 3,4,5,6 {'source': 6, 'target': 5, 'port': 0, 'syn': 0, 'delay': 65.0, 'weight': 6500.0} {'source': 6, 'target': 7, 'port': 0, 'syn': 0, 'delay': 67.0, 'weight': 6700.0} {'source': 6, 'target': 9, 'port': 0, 'syn': 0, 'delay': 69.0, 'weight': 6900.0} - - diff --git a/python/test/nest_iaf_psc_alpha.py b/python/test/nest_iaf_psc_alpha.py index a2c870c68..563c3ae44 100644 --- a/python/test/nest_iaf_psc_alpha.py +++ b/python/test/nest_iaf_psc_alpha.py @@ -1,9 +1,10 @@ import sys + import nest import numpy as np tolerance = 0.00005 -neuron = nest.Create('iaf_psc_alpha') +neuron = nest.Create("iaf_psc_alpha") spike = nest.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -16,52 +17,53 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} nest.Connect(spike, neuron, conn_spec, syn_spec_ex) nest.Connect(spike, neuron, conn_spec, syn_spec_in) -#record = nest.CreateRecord("", ["V_m_rel"], [neuron[0]], [0]) -voltmeter = nest.Create('voltmeter') +# record = nest.CreateRecord("", ["V_m_rel"], [neuron[0]], [0]) +voltmeter = nest.Create("voltmeter") nest.Connect(voltmeter, neuron) nest.Simulate(800.0) -#data_list = nest.GetRecordData(record) -#t=[row[0] for row in data_list] -#V_m=[-70.0+row[1] for row in data_list] +# data_list = nest.GetRecordData(record) +# t=[row[0] for row in data_list] +# V_m=[-70.0+row[1] for row in data_list] -#import matplotlib.pyplot as plt -#plt.figure() -#plt.plot(t, V_m, "r-") -#plt.show() +# import matplotlib.pyplot as plt +# plt.figure() +# plt.plot(t, V_m, "r-") +# plt.show() -#sys.exit() +# sys.exit() dmm = nest.GetStatus(voltmeter)[0] V_m = dmm["events"]["V_m"] t = dmm["events"]["times"] -with open('test_iaf_psc_alpha_nest.txt', 'w') as f: +with open("test_iaf_psc_alpha_nest.txt", "w") as f: for i in range(len(t)): f.write("%s\t%s\n" % (t[i], V_m[i])) import matplotlib.pyplot as plt + plt.figure() plt.plot(t, V_m, "r-") plt.show() sys.exit() -data = np.loadtxt('test_iaf_psc_alpha_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) - -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +data = np.loadtxt("test_iaf_psc_alpha_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) + +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/parrot_neuron.py b/python/test/parrot_neuron.py index 9dfe7b949..284727c15 100644 --- a/python/test/parrot_neuron.py +++ b/python/test/parrot_neuron.py @@ -4,45 +4,43 @@ neuron0 = neuron[0:0] neuron1 = neuron[1:1] -ngpu.SetStatus(neuron0, {"I_e":1000.0}) +ngpu.SetStatus(neuron0, {"I_e": 1000.0}) parrot = ngpu.Create("parrot_neuron", 2) parrot0 = parrot[0:0] parrot1 = parrot[1:1] -conn_dict={"rule": "one_to_one"} -syn_dict0={"weight": 0.5, "delay": 1.0, "receptor":0} +conn_dict = {"rule": "one_to_one"} +syn_dict0 = {"weight": 0.5, "delay": 1.0, "receptor": 0} ngpu.Connect(neuron0, parrot0, conn_dict, syn_dict0) -syn_dict1={"weight": 0.1, "delay": 1.0, "receptor":0} +syn_dict1 = {"weight": 0.1, "delay": 1.0, "receptor": 0} ngpu.Connect(parrot0, neuron1, conn_dict, syn_dict1) ngpu.Connect(parrot0, parrot1, conn_dict, syn_dict1) neuron0_record = ngpu.CreateRecord("", ["V_m"], [neuron0[0]], [0]) -parrot0_record = ngpu.CreateRecord("", ["V", "spike"], [parrot0[0], parrot0[0]], - [0, 0]) +parrot0_record = ngpu.CreateRecord("", ["V", "spike"], [parrot0[0], parrot0[0]], [0, 0]) neuron1_record = ngpu.CreateRecord("", ["g1"], [neuron1[0]], [0]) -parrot1_record = ngpu.CreateRecord("", ["V", "spike"], [parrot1[0], parrot1[0]], - [0, 0]) +parrot1_record = ngpu.CreateRecord("", ["V", "spike"], [parrot1[0], parrot1[0]], [0, 0]) ngpu.Simulate() neuron0_data_list = ngpu.GetRecordData(neuron0_record) -t_neuron0=[row[0] for row in neuron0_data_list] -V_m=[row[1] for row in neuron0_data_list] +t_neuron0 = [row[0] for row in neuron0_data_list] +V_m = [row[1] for row in neuron0_data_list] parrot0_data_list = ngpu.GetRecordData(parrot0_record) -t_parrot0=[row[0] for row in parrot0_data_list] -V_parrot0=[row[1] for row in parrot0_data_list] -spike_parrot0=[row[2] for row in parrot0_data_list] +t_parrot0 = [row[0] for row in parrot0_data_list] +V_parrot0 = [row[1] for row in parrot0_data_list] +spike_parrot0 = [row[2] for row in parrot0_data_list] neuron1_data_list = ngpu.GetRecordData(neuron1_record) -t_neuron1=[row[0] for row in neuron1_data_list] -g1=[row[1] for row in neuron1_data_list] +t_neuron1 = [row[0] for row in neuron1_data_list] +g1 = [row[1] for row in neuron1_data_list] parrot1_data_list = ngpu.GetRecordData(parrot1_record) -t_parrot1=[row[0] for row in parrot1_data_list] -V_parrot1=[row[1] for row in parrot1_data_list] -spike_parrot1=[row[2] for row in parrot1_data_list] +t_parrot1 = [row[0] for row in parrot1_data_list] +V_parrot1 = [row[1] for row in parrot1_data_list] +spike_parrot1 = [row[2] for row in parrot1_data_list] import matplotlib.pyplot as plt diff --git a/python/test/plot_aeif_psc_delta_multisynapse.py b/python/test/plot_aeif_psc_delta_multisynapse.py index 44ded7fd2..d73cd1eb2 100644 --- a/python/test/plot_aeif_psc_delta_multisynapse.py +++ b/python/test/plot_aeif_psc_delta_multisynapse.py @@ -1,9 +1,12 @@ import sys + import nestgpu as ngpu -neuron = ngpu.Create('aeif_psc_delta_multisynapse') -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, \ - "g_L":300.0, "C_m":20000.0}) +neuron = ngpu.Create("aeif_psc_delta_multisynapse") +ngpu.SetStatus( + neuron, + {"V_peak": 0.0, "a": 4.0, "b": 80.5, "E_L": -70.6, "g_L": 300.0, "C_m": 20000.0}, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -13,26 +16,26 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] import matplotlib.pyplot as plt diff --git a/python/test/test_aeif_cond_alpha.py b/python/test/test_aeif_cond_alpha.py index de328891f..07e6430bf 100644 --- a/python/test/test_aeif_cond_alpha.py +++ b/python/test/test_aeif_cond_alpha.py @@ -1,11 +1,24 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.00005 -neuron = ngpu.Create('aeif_cond_alpha', 1) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, - "g_L":300.0, 'E_rev_ex':20.0, 'E_rev_in': -85.0, - 'tau_syn_ex':40.0, 'tau_syn_in': 20.0}) +neuron = ngpu.Create("aeif_cond_alpha", 1) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "E_rev_ex": 20.0, + "E_rev_in": -85.0, + "tau_syn_ex": 40.0, + "tau_syn_in": 20.0, + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] @@ -16,9 +29,9 @@ delay = [1.0, 100.0] weight = [0.1, 0.2] -conn_spec={"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +conn_spec = {"rule": "all_to_all"} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) @@ -27,14 +40,14 @@ ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] -data = np.loadtxt('test_aeif_cond_alpha_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_aeif_cond_alpha_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) """ import matplotlib.pyplot as plt @@ -44,11 +57,11 @@ plt.show() """ -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -#print(dV) -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +# print(dV) +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_aeif_cond_alpha_multisynapse.py b/python/test/test_aeif_cond_alpha_multisynapse.py index 2773f66b5..7a4cf56b2 100644 --- a/python/test/test_aeif_cond_alpha_multisynapse.py +++ b/python/test/test_aeif_cond_alpha_multisynapse.py @@ -1,11 +1,22 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_cond_alpha_multisynapse', 1, 3) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, - "g_L":300.0, 'E_rev':[20.0, 0.0, -85.0], \ - 'tau_syn':[40.0, 20.0, 30.0]}) +neuron = ngpu.Create("aeif_cond_alpha_multisynapse", 1, 3) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "E_rev": [20.0, 0.0, -85.0], + "tau_syn": [40.0, 20.0, 30.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -15,37 +26,37 @@ delay = [1.0, 100.0, 130.0] weight = [0.1, 0.2, 0.5] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} for syn in range(3): - syn_spec={'receptor': syn, 'weight': weight[syn], 'delay': delay[syn]} + syn_spec = {"receptor": syn, "weight": weight[syn], "delay": delay[syn]} ngpu.Connect(spike, neuron, conn_spec, syn_spec) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_cond_alpha_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_cond_alpha_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('test_aeif_cond_alpha_multisynapse_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_aeif_cond_alpha_multisynapse_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_aeif_cond_beta.py b/python/test/test_aeif_cond_beta.py index b50dca15c..b877ba562 100644 --- a/python/test/test_aeif_cond_beta.py +++ b/python/test/test_aeif_cond_beta.py @@ -1,14 +1,26 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.00005 -neuron = ngpu.Create('aeif_cond_beta', 1) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, "g_L":300.0, - 'E_rev_ex': 20.0, 'E_rev_in': -85.0, - 'tau_decay_ex': 40.0, - 'tau_decay_in': 20.0, - 'tau_rise_ex': 20.0, - 'tau_rise_in': 5.0}) +neuron = ngpu.Create("aeif_cond_beta", 1) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "E_rev_ex": 20.0, + "E_rev_in": -85.0, + "tau_decay_ex": 40.0, + "tau_decay_in": 20.0, + "tau_rise_ex": 20.0, + "tau_rise_in": 5.0, + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -18,9 +30,9 @@ delay = [1.0, 100.0] weight = [0.1, 0.2] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} for syn in range(2): - syn_spec={'receptor': syn, 'weight': weight[syn], 'delay': delay[syn]} + syn_spec = {"receptor": syn, "weight": weight[syn], "delay": delay[syn]} ngpu.Connect(spike, neuron, conn_spec, syn_spec) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) @@ -28,14 +40,14 @@ ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] -data = np.loadtxt('test_aeif_cond_beta_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_aeif_cond_beta_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) """ import matplotlib.pyplot as plt @@ -45,10 +57,10 @@ plt.show() """ -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_aeif_cond_beta_multisynapse.py b/python/test/test_aeif_cond_beta_multisynapse.py index e01c60119..528fa3090 100644 --- a/python/test/test_aeif_cond_beta_multisynapse.py +++ b/python/test/test_aeif_cond_beta_multisynapse.py @@ -1,12 +1,23 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_cond_beta_multisynapse', 1, 3) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, - "g_L":300.0, 'E_rev':[20.0, 0.0, -85.0], \ - 'tau_decay':[40.0, 20.0, 30.0], \ - 'tau_rise':[20.0, 10.0, 5.0]}) +neuron = ngpu.Create("aeif_cond_beta_multisynapse", 1, 3) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "E_rev": [20.0, 0.0, -85.0], + "tau_decay": [40.0, 20.0, 30.0], + "tau_rise": [20.0, 10.0, 5.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -16,37 +27,37 @@ delay = [1.0, 100.0, 130.0] weight = [0.1, 0.2, 0.5] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} for syn in range(3): - syn_spec={'receptor': syn, 'weight': weight[syn], 'delay': delay[syn]} + syn_spec = {"receptor": syn, "weight": weight[syn], "delay": delay[syn]} ngpu.Connect(spike, neuron, conn_spec, syn_spec) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_cond_beta_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_cond_beta_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('test_aeif_cond_beta_multisynapse_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_aeif_cond_beta_multisynapse_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_aeif_psc_alpha.py b/python/test/test_aeif_psc_alpha.py index 53b6e44fc..302ceb2b1 100644 --- a/python/test/test_aeif_psc_alpha.py +++ b/python/test/test_aeif_psc_alpha.py @@ -1,12 +1,22 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 3e-6 -neuron = ngpu.Create('aeif_psc_alpha', 1) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, - "g_L":300.0, - "tau_syn_ex": 40.0, - "tau_syn_in": 20.0}) +neuron = ngpu.Create("aeif_psc_alpha", 1) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "tau_syn_ex": 40.0, + "tau_syn_in": 20.0, + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -16,39 +26,39 @@ delay = [1.0, 100.0] weight = [1.0, 2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_alpha_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_alpha_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('test_aeif_psc_alpha_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_aeif_psc_alpha_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_aeif_psc_alpha_multisynapse.py b/python/test/test_aeif_psc_alpha_multisynapse.py index 2d8d5b32f..4a3ad6f61 100644 --- a/python/test/test_aeif_psc_alpha_multisynapse.py +++ b/python/test/test_aeif_psc_alpha_multisynapse.py @@ -1,10 +1,21 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_psc_alpha_multisynapse', 1, 2) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, \ - "g_L":300.0, "tau_syn":[40.0, 20.0]}) +neuron = ngpu.Create("aeif_psc_alpha_multisynapse", 1, 2) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "tau_syn": [40.0, 20.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -14,40 +25,40 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_alpha_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_alpha_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('test_aeif_psc_alpha_multisynapse_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_aeif_psc_alpha_multisynapse_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_aeif_psc_delta.py b/python/test/test_aeif_psc_delta.py index 8e4d227b4..2f561da22 100644 --- a/python/test/test_aeif_psc_delta.py +++ b/python/test/test_aeif_psc_delta.py @@ -1,10 +1,14 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_psc_delta') -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, \ - "g_L":300.0, "C_m":20000.0}) +neuron = ngpu.Create("aeif_psc_delta") +ngpu.SetStatus( + neuron, + {"V_peak": 0.0, "a": 4.0, "b": 80.5, "E_L": -70.6, "g_L": 300.0, "C_m": 20000.0}, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -15,40 +19,40 @@ # the aeif_psc_delta model has one port, negative inputs require negative weights weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_delta_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_delta_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('test_aeif_psc_delta_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_aeif_psc_delta_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_aeif_psc_exp.py b/python/test/test_aeif_psc_exp.py index b03394378..3fcd703b0 100644 --- a/python/test/test_aeif_psc_exp.py +++ b/python/test/test_aeif_psc_exp.py @@ -1,13 +1,22 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 5e-6 -neuron = ngpu.Create('aeif_psc_exp', 1) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, - "E_L":-70.6, - "g_L":300.0, - "tau_syn_ex": 40.0, - "tau_syn_in": 20.0}) +neuron = ngpu.Create("aeif_psc_exp", 1) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "tau_syn_ex": 40.0, + "tau_syn_in": 20.0, + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -17,40 +26,40 @@ delay = [1.0, 100.0] weight = [1.0, 2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('test_aeif_psc_exp_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_aeif_psc_exp_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_aeif_psc_exp_multisynapse.py b/python/test/test_aeif_psc_exp_multisynapse.py index 29a03bd27..b123e648c 100644 --- a/python/test/test_aeif_psc_exp_multisynapse.py +++ b/python/test/test_aeif_psc_exp_multisynapse.py @@ -1,10 +1,21 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('aeif_psc_exp_multisynapse', 1, 2) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, "E_L":-70.6, \ - "g_L":300.0, "tau_syn":[40.0, 20.0]}) +neuron = ngpu.Create("aeif_psc_exp_multisynapse", 1, 2) +ngpu.SetStatus( + neuron, + { + "V_peak": 0.0, + "a": 4.0, + "b": 80.5, + "E_L": -70.6, + "g_L": 300.0, + "tau_syn": [40.0, 20.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -14,40 +25,40 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_aeif_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_aeif_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('test_aeif_psc_exp_multisynapse_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_aeif_psc_exp_multisynapse_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_all.sh b/python/test/test_all.sh index fb94849b9..1873d693f 100755 --- a/python/test/test_all.sh +++ b/python/test/test_all.sh @@ -15,6 +15,6 @@ for fn in syn_group connect getarr setvar2 group_param; do python3 test_$fn.py 2>&1 | grep -v dyl > tmp diff -qs tmp logp3_$fn.txt 2>&1 >> log.txt res=$? - echo $fn : ${pass_str[$res]} + echo $fn : ${pass_str[$res]} done rm -f tmp diff --git a/python/test/test_brunel_array.py b/python/test/test_brunel_array.py index 2eea10825..b7e64cf16 100644 --- a/python/test/test_brunel_array.py +++ b/python/test/test_brunel_array.py @@ -1,36 +1,37 @@ -import sys import ctypes -import nestgpu as ngpu +import sys from random import randrange +import nestgpu as ngpu + if len(sys.argv) != 2: - print ("Usage: python %s n_neurons" % sys.argv[0]) + print("Usage: python %s n_neurons" % sys.argv[0]) quit() - -order = int(sys.argv[1])/5 + +order = int(sys.argv[1]) / 5 n_test = 1000 print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -38,56 +39,49 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons - +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons + # receptor parameters E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 std_delay = 0.25 min_delay = 0.1 # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_delays = ngpu.RandomNormalClipped(CE*n_neurons, mean_delay, - std_delay, min_delay, - mean_delay+3*std_delay) +exc_delays = ngpu.RandomNormalClipped(CE * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay) -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": {"array":exc_delays}, "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = {"weight": Wex, "delay": {"array": exc_delays}, "receptor": 0} ngpu.Connect(exc_neuron, neuron, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_delays = ngpu.RandomNormalClipped(CI*n_neurons, mean_delay, - std_delay, min_delay, - mean_delay+3*std_delay) +inh_delays = ngpu.RandomNormalClipped(CI * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay) -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":{"array": inh_delays}, - "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = {"weight": Win, "delay": {"array": inh_delays}, "receptor": 1} ngpu.Connect(inh_neuron, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) -i_neuron_list = [neuron[0], neuron[n_neurons-1]] +i_neuron_list = [neuron[0], neuron[n_neurons - 1]] i_receptor_list = [0, 0] var_name_list = ["spike", "spike"] - -for i in range(n_test-2): + +for i in range(n_test - 2): i_neuron_list.append(neuron[randrange(n_neurons)]) i_receptor_list.append(0) var_name_list.append("spike") @@ -101,24 +95,23 @@ data_list = ngpu.GetRecordData(record) row_sum = data_list[0] -for row in data_list[1:len(data_list)]: +for row in data_list[1 : len(data_list)]: for i in range(len(row_sum)): row_sum[i] = row_sum[i] + row[i] -spike = row_sum[1:len(row_sum)] +spike = row_sum[1 : len(row_sum)] import numpy as np + spike_arr = np.array(spike) mean_spike_num = np.mean(spike_arr) diff = abs(mean_spike_num - 30.78) -max_diff = 3.0*np.sqrt(30.78)/np.sqrt(n_test) +max_diff = 3.0 * np.sqrt(30.78) / np.sqrt(n_test) std_spike_num = np.std(spike_arr) print(mean_spike_num) -print (diff, max_diff) +print(diff, max_diff) if diff < max_diff: sys.exit(0) else: sys.exit(1) - - diff --git a/python/test/test_brunel_list.py b/python/test/test_brunel_list.py index 01f1b1fa6..7eb037f37 100644 --- a/python/test/test_brunel_list.py +++ b/python/test/test_brunel_list.py @@ -1,8 +1,9 @@ -import sys -import math import ctypes -import nestgpu as ngpu +import math +import sys from random import randrange + +import nestgpu as ngpu import numpy as np order = 200 @@ -11,24 +12,24 @@ expected_rate = 30.78 print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -37,8 +38,8 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons neuron_list = neuron.ToList() exc_neuron_list = exc_neuron.ToList() inh_neuron_list = inh_neuron.ToList() @@ -47,8 +48,7 @@ E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 @@ -57,36 +57,49 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron_list, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron_list, exc_neuron_list, inh_conn_dict, inh_syn_dict) ngpu.Connect(inh_neuron_list, inh_neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg_list, neuron_list, pg_conn_dict, pg_syn_dict) -i_neuron_list = [neuron[0], neuron[n_neurons-1]] +i_neuron_list = [neuron[0], neuron[n_neurons - 1]] i_receptor_list = [0, 0] var_name_list = ["spike", "spike"] - -for i in range(n_test-2): + +for i in range(n_test - 2): i_neuron_list.append(neuron[randrange(n_neurons)]) i_receptor_list.append(0) var_name_list.append("spike") @@ -99,53 +112,50 @@ data_list = ngpu.GetRecordData(record) for i in range(1000): - conn_id = ngpu.GetConnections(i+1) + conn_id = ngpu.GetConnections(i + 1) n_out_conn = len(conn_id) - if (n_out_conn!=NE+NI): - print("Expected number of out connections per neuron: ", NE+NI) - print("Number of out connections of neuron ", i + 1, ": ", \ - n_out_conn) + if n_out_conn != NE + NI: + print("Expected number of out connections per neuron: ", NE + NI) + print("Number of out connections of neuron ", i + 1, ": ", n_out_conn) sys.exit(1) - + for i in range(10): i_target = randrange(n_neurons) - conn_id = ngpu.GetConnections(target=i_target+1) + conn_id = ngpu.GetConnections(target=i_target + 1) n_in_conn = len(conn_id) - if (n_in_conn!=NE+NI+1): - print("Expected number of in connections per neuron: ", NE+NI+1) - print("Number of in connections of neuron ", i_target, ": ", \ - n_in_conn) + if n_in_conn != NE + NI + 1: + print("Expected number of in connections per neuron: ", NE + NI + 1) + print("Number of in connections of neuron ", i_target, ": ", n_in_conn) sys.exit(1) row_sum = list(data_list[0]) -for row in data_list[1:len(data_list)]: +for row in data_list[1 : len(data_list)]: for i in range(len(row_sum)): row_sum[i] = row_sum[i] + row[i] -spike = row_sum[1:len(row_sum)] +spike = row_sum[1 : len(row_sum)] spike_arr = np.array(spike) min_spike_num = np.min(spike_arr) max_spike_num = np.max(spike_arr) -if (min_spike_num < expected_rate - 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) +if min_spike_num < expected_rate - 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Min rate :", min_spike_num) sys.exit(1) - -if (max_spike_num > expected_rate + 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) + +if max_spike_num > expected_rate + 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Max rate :", max_spike_num) sys.exit(1) mean_spike_num = np.mean(spike_arr) diff = abs(mean_spike_num - expected_rate) -max_diff = 3.0*np.sqrt(expected_rate)/np.sqrt(n_test) -print ("Expected rate: ", expected_rate) +max_diff = 3.0 * np.sqrt(expected_rate) / np.sqrt(n_test) +print("Expected rate: ", expected_rate) print("Mean rate: ", mean_spike_num) if diff > max_diff: sys.exit(1) else: sys.exit(0) - diff --git a/python/test/test_brunel_mpi.py b/python/test/test_brunel_mpi.py index 97971994c..ec63051e8 100644 --- a/python/test/test_brunel_mpi.py +++ b/python/test/test_brunel_mpi.py @@ -1,16 +1,16 @@ -import sys -import math import ctypes -import nestgpu as ngpu +import math +import sys from random import randrange -import numpy as np +import nestgpu as ngpu +import numpy as np -ngpu.ConnectMpiInit(); +ngpu.ConnectMpiInit() mpi_np = ngpu.MpiNp() if mpi_np != 2: - print ("Usage: mpirun -np 2 python %s" % sys.argv[0]) + print("Usage: mpirun -np 2 python %s" % sys.argv[0]) quit() order = 100 @@ -21,24 +21,24 @@ mpi_id = ngpu.MpiId() print("Building on host ", mpi_id, " ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -47,8 +47,8 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons neuron_list = neuron.ToList() exc_neuron_list = exc_neuron.ToList() inh_neuron_list = inh_neuron.ToList() @@ -57,8 +57,7 @@ E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 @@ -67,36 +66,49 @@ # Excitatory local connections, defined on all hosts # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and fixed indegree CE//2 -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE//2} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE // 2} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron_list, exc_conn_dict, exc_syn_dict) # Inhibitory local connections, defined on all hosts # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and fixed indegree CI//2 -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI//2} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI // 2} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron_list, exc_neuron_list, inh_conn_dict, inh_syn_dict) ngpu.Connect(inh_neuron_list, inh_neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg_list, neuron_list, pg_conn_dict, pg_syn_dict) -i_neuron_list = [neuron[0], neuron[n_neurons-1]] +i_neuron_list = [neuron[0], neuron[n_neurons - 1]] i_receptor_list = [0, 0] var_name_list = ["spike", "spike"] - -for i in range(n_test-2): + +for i in range(n_test - 2): i_neuron_list.append(neuron[randrange(n_neurons)]) i_receptor_list.append(0) var_name_list.append("spike") @@ -112,8 +124,8 @@ # connect excitatory neurons to port 0 of all neurons # weight Wex and fixed indegree CE//2 # host 0 to host 1 -re_conn_dict={"rule": "fixed_indegree", "indegree": CE//2} -re_syn_dict=exc_syn_dict +re_conn_dict = {"rule": "fixed_indegree", "indegree": CE // 2} +re_syn_dict = exc_syn_dict # host 0 to host 1 ngpu.RemoteConnect(0, exc_neuron_list, 1, neuron, re_conn_dict, re_syn_dict) # host 1 to host 0 @@ -123,8 +135,8 @@ # connect inhibitory neurons to port 1 of all neurons # weight Win and fixed indegree CI//2 # host 0 to host 1 -ri_conn_dict={"rule": "fixed_indegree", "indegree": CI//2} -ri_syn_dict=inh_syn_dict +ri_conn_dict = {"rule": "fixed_indegree", "indegree": CI // 2} +ri_syn_dict = inh_syn_dict # host 0 to host 1 ngpu.RemoteConnect(0, inh_neuron, 1, neuron, ri_conn_dict, ri_syn_dict) # host 1 to host 0 @@ -135,54 +147,51 @@ data_list = ngpu.GetRecordData(record) for i in range(500): - conn_id = ngpu.GetConnections(i+1) + conn_id = ngpu.GetConnections(i + 1) n_out_conn = len(conn_id) - if (n_out_conn!=NE+NI): - print("Expected number of out connections per neuron: ", NE+NI) - print("Number of out connections of neuron ", i + 1, ": ", \ - n_out_conn) + if n_out_conn != NE + NI: + print("Expected number of out connections per neuron: ", NE + NI) + print("Number of out connections of neuron ", i + 1, ": ", n_out_conn) sys.exit(1) - + for i in range(10): i_target = randrange(n_neurons) - conn_id = ngpu.GetConnections(target=i_target+1) + conn_id = ngpu.GetConnections(target=i_target + 1) n_in_conn = len(conn_id) - if (n_in_conn!=2*(NE+NI)+1): - print("Expected number of in connections per neuron: ", 2*(NE+NI)+1) - print("Number of in connections of neuron ", i_target, ": ", \ - n_in_conn) + if n_in_conn != 2 * (NE + NI) + 1: + print("Expected number of in connections per neuron: ", 2 * (NE + NI) + 1) + print("Number of in connections of neuron ", i_target, ": ", n_in_conn) sys.exit(1) row_sum = list(data_list[0]) -for row in data_list[1:len(data_list)]: +for row in data_list[1 : len(data_list)]: for i in range(len(row_sum)): row_sum[i] = row_sum[i] + row[i] -spike = row_sum[1:len(row_sum)] +spike = row_sum[1 : len(row_sum)] spike_arr = np.array(spike) min_spike_num = np.min(spike_arr) max_spike_num = np.max(spike_arr) -if (min_spike_num < expected_rate - 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) +if min_spike_num < expected_rate - 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Min rate :", min_spike_num) sys.exit(1) - -if (max_spike_num > expected_rate + 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) + +if max_spike_num > expected_rate + 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Max rate :", max_spike_num) sys.exit(1) mean_spike_num = np.mean(spike_arr) diff = abs(mean_spike_num - expected_rate) -max_diff = 3.0*np.sqrt(expected_rate)/np.sqrt(n_test) -print ("Expected rate: ", expected_rate) +max_diff = 3.0 * np.sqrt(expected_rate) / np.sqrt(n_test) +print("Expected rate: ", expected_rate) print("Mean rate: ", mean_spike_num) if diff > max_diff: sys.exit(1) else: ngpu.MpiFinalize() sys.exit(0) - diff --git a/python/test/test_brunel_outdegree.py b/python/test/test_brunel_outdegree.py index d972acd7c..5e1b273f6 100644 --- a/python/test/test_brunel_outdegree.py +++ b/python/test/test_brunel_outdegree.py @@ -1,8 +1,9 @@ -import sys -import math import ctypes -import nestgpu as ngpu +import math +import sys from random import randrange + +import nestgpu as ngpu import numpy as np order = 200 @@ -11,23 +12,23 @@ expected_rate = 30.78 print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CPN = 1000 # number of output connections per neuron +CPN = 1000 # number of output connections per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -36,8 +37,8 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons neuron_list = neuron.ToList() exc_neuron_list = exc_neuron.ToList() inh_neuron_list = inh_neuron.ToList() @@ -46,8 +47,7 @@ E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 @@ -56,34 +56,48 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CPN connections per neuron -exc_conn_dict={"rule": "fixed_outdegree", "outdegree": CPN} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_outdegree", "outdegree": CPN} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron_list, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CPN connections per neuron -inh_conn_dict={"rule": "fixed_outdegree", "outdegree": CPN} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_outdegree", "outdegree": CPN} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron_list, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg_list, neuron_list, pg_conn_dict, pg_syn_dict) -i_neuron_list = [neuron[0], neuron[n_neurons-1]] +i_neuron_list = [neuron[0], neuron[n_neurons - 1]] i_receptor_list = [0, 0] var_name_list = ["spike", "spike"] - -for i in range(n_test-2): + +for i in range(n_test - 2): i_neuron_list.append(neuron[randrange(n_neurons)]) i_receptor_list.append(0) var_name_list.append("spike") @@ -96,53 +110,50 @@ data_list = ngpu.GetRecordData(record) for i in range(1000): - conn_id = ngpu.GetConnections(i+1) + conn_id = ngpu.GetConnections(i + 1) n_out_conn = len(conn_id) - if (n_out_conn!=NE+NI): - print("Expected number of out connections per neuron: ", NE+NI) - print("Number of out connections of neuron ", i + 1, ": ", \ - n_out_conn) + if n_out_conn != NE + NI: + print("Expected number of out connections per neuron: ", NE + NI) + print("Number of out connections of neuron ", i + 1, ": ", n_out_conn) sys.exit(1) - + for i in range(10): i_target = randrange(n_neurons) - conn_id = ngpu.GetConnections(target=i_target+1) + conn_id = ngpu.GetConnections(target=i_target + 1) n_in_conn = len(conn_id) - if (n_in_conn!=NE+NI+1): - print("Expected number of in connections per neuron: ", NE+NI+1) - print("Number of in connections of neuron ", i_target, ": ", \ - n_in_conn) + if n_in_conn != NE + NI + 1: + print("Expected number of in connections per neuron: ", NE + NI + 1) + print("Number of in connections of neuron ", i_target, ": ", n_in_conn) sys.exit(1) row_sum = list(data_list[0]) -for row in data_list[1:len(data_list)]: +for row in data_list[1 : len(data_list)]: for i in range(len(row_sum)): row_sum[i] = row_sum[i] + row[i] -spike = row_sum[1:len(row_sum)] +spike = row_sum[1 : len(row_sum)] spike_arr = np.array(spike) min_spike_num = np.min(spike_arr) max_spike_num = np.max(spike_arr) -if (min_spike_num < expected_rate - 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) +if min_spike_num < expected_rate - 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Min rate :", min_spike_num) sys.exit(1) - -if (max_spike_num > expected_rate + 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) + +if max_spike_num > expected_rate + 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Max rate :", max_spike_num) sys.exit(1) mean_spike_num = np.mean(spike_arr) diff = abs(mean_spike_num - expected_rate) -max_diff = 3.0*np.sqrt(expected_rate)/np.sqrt(n_test) -print ("Expected rate: ", expected_rate) +max_diff = 3.0 * np.sqrt(expected_rate) / np.sqrt(n_test) +print("Expected rate: ", expected_rate) print("Mean rate: ", mean_spike_num) if diff > max_diff: sys.exit(1) else: sys.exit(0) - diff --git a/python/test/test_brunel_outdegree_mpi.py b/python/test/test_brunel_outdegree_mpi.py index cde3c3d76..7533c79d4 100644 --- a/python/test/test_brunel_outdegree_mpi.py +++ b/python/test/test_brunel_outdegree_mpi.py @@ -1,16 +1,16 @@ -import sys -import math import ctypes -import nestgpu as ngpu +import math +import sys from random import randrange -import numpy as np +import nestgpu as ngpu +import numpy as np -ngpu.ConnectMpiInit(); +ngpu.ConnectMpiInit() mpi_np = ngpu.MpiNp() if mpi_np != 2: - print ("Usage: mpirun -np 2 python %s" % sys.argv[0]) + print("Usage: mpirun -np 2 python %s" % sys.argv[0]) quit() order = 100 @@ -21,23 +21,23 @@ mpi_id = ngpu.MpiId() print("Building on host ", mpi_id, " ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CPN = 500 # number of output connections per neuron +CPN = 500 # number of output connections per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -46,8 +46,8 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons neuron_list = neuron.ToList() exc_neuron_list = exc_neuron.ToList() inh_neuron_list = inh_neuron.ToList() @@ -56,8 +56,7 @@ E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 @@ -66,34 +65,48 @@ # Excitatory local connections, defined on all hosts # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and fixed outdegree CPN -exc_conn_dict={"rule": "fixed_outdegree", "outdegree": CPN} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_outdegree", "outdegree": CPN} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron_list, exc_conn_dict, exc_syn_dict) # Inhibitory local connections, defined on all hosts # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and fixed outdegree CPN -inh_conn_dict={"rule": "fixed_outdegree", "outdegree": CPN} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_outdegree", "outdegree": CPN} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron_list, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg_list, neuron_list, pg_conn_dict, pg_syn_dict) -i_neuron_list = [neuron[0], neuron[n_neurons-1]] +i_neuron_list = [neuron[0], neuron[n_neurons - 1]] i_receptor_list = [0, 0] var_name_list = ["spike", "spike"] - -for i in range(n_test-2): + +for i in range(n_test - 2): i_neuron_list.append(neuron[randrange(n_neurons)]) i_receptor_list.append(0) var_name_list.append("spike") @@ -109,8 +122,8 @@ # connect excitatory neurons to port 0 of all neurons # weight Wex and fixed outdegree CPN # host 0 to host 1 -re_conn_dict={"rule": "fixed_outdegree", "outdegree": CPN} -re_syn_dict=exc_syn_dict +re_conn_dict = {"rule": "fixed_outdegree", "outdegree": CPN} +re_syn_dict = exc_syn_dict # host 0 to host 1 ngpu.RemoteConnect(0, exc_neuron_list, 1, neuron, re_conn_dict, re_syn_dict) # host 1 to host 0 @@ -120,8 +133,8 @@ # connect inhibitory neurons to port 1 of all neurons # weight Win and fixed outdegree CPN # host 0 to host 1 -ri_conn_dict={"rule": "fixed_outdegree", "outdegree": CPN} -ri_syn_dict=inh_syn_dict +ri_conn_dict = {"rule": "fixed_outdegree", "outdegree": CPN} +ri_syn_dict = inh_syn_dict # host 0 to host 1 ngpu.RemoteConnect(0, inh_neuron, 1, neuron, ri_conn_dict, ri_syn_dict) # host 1 to host 0 @@ -132,54 +145,51 @@ data_list = ngpu.GetRecordData(record) for i in range(500): - conn_id = ngpu.GetConnections(i+1) + conn_id = ngpu.GetConnections(i + 1) n_out_conn = len(conn_id) - if (n_out_conn!=NE+NI): - print("Expected number of out connections per neuron: ", NE+NI) - print("Number of out connections of neuron ", i + 1, ": ", \ - n_out_conn) + if n_out_conn != NE + NI: + print("Expected number of out connections per neuron: ", NE + NI) + print("Number of out connections of neuron ", i + 1, ": ", n_out_conn) sys.exit(1) - + for i in range(10): i_target = randrange(n_neurons) - conn_id = ngpu.GetConnections(target=i_target+1) + conn_id = ngpu.GetConnections(target=i_target + 1) n_in_conn = len(conn_id) - if (n_in_conn!=2*(NE+NI)+1): - print("Expected number of in connections per neuron: ", 2*(NE+NI)+1) - print("Number of in connections of neuron ", i_target, ": ", \ - n_in_conn) + if n_in_conn != 2 * (NE + NI) + 1: + print("Expected number of in connections per neuron: ", 2 * (NE + NI) + 1) + print("Number of in connections of neuron ", i_target, ": ", n_in_conn) sys.exit(1) row_sum = list(data_list[0]) -for row in data_list[1:len(data_list)]: +for row in data_list[1 : len(data_list)]: for i in range(len(row_sum)): row_sum[i] = row_sum[i] + row[i] -spike = row_sum[1:len(row_sum)] +spike = row_sum[1 : len(row_sum)] spike_arr = np.array(spike) min_spike_num = np.min(spike_arr) max_spike_num = np.max(spike_arr) -if (min_spike_num < expected_rate - 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) +if min_spike_num < expected_rate - 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Min rate :", min_spike_num) sys.exit(1) - -if (max_spike_num > expected_rate + 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) + +if max_spike_num > expected_rate + 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Max rate :", max_spike_num) sys.exit(1) mean_spike_num = np.mean(spike_arr) diff = abs(mean_spike_num - expected_rate) -max_diff = 3.0*np.sqrt(expected_rate)/np.sqrt(n_test) -print ("Expected rate: ", expected_rate) +max_diff = 3.0 * np.sqrt(expected_rate) / np.sqrt(n_test) +print("Expected rate: ", expected_rate) print("Mean rate: ", mean_spike_num) if diff > max_diff: sys.exit(1) else: ngpu.MpiFinalize() sys.exit(0) - diff --git a/python/test/test_brunel_user_m1.py b/python/test/test_brunel_user_m1.py index ae8c6390c..4d4890e5c 100644 --- a/python/test/test_brunel_user_m1.py +++ b/python/test/test_brunel_user_m1.py @@ -1,8 +1,9 @@ -import sys -import math import ctypes -import nestgpu as ngpu +import math +import sys from random import randrange + +import nestgpu as ngpu import numpy as np order = 200 @@ -11,24 +12,24 @@ expected_rate = 30.78 print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CE = 800 # number of excitatory synapses per neuron -CI = CE//4 # number of inhibitory synapses per neuron +CE = 800 # number of excitatory synapses per neuron +CI = CE // 4 # number of inhibitory synapses per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -37,8 +38,8 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("user_m1", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons neuron_list = neuron.ToList() exc_neuron_list = exc_neuron.ToList() inh_neuron_list = inh_neuron.ToList() @@ -47,8 +48,7 @@ E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 @@ -57,36 +57,49 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CE connections per neuron -exc_conn_dict={"rule": "fixed_indegree", "indegree": CE} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_indegree", "indegree": CE} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron_list, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CI connections per neuron -inh_conn_dict={"rule": "fixed_indegree", "indegree": CI} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_indegree", "indegree": CI} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron_list, exc_neuron_list, inh_conn_dict, inh_syn_dict) ngpu.Connect(inh_neuron_list, inh_neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, - "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg_list, neuron_list, pg_conn_dict, pg_syn_dict) -i_neuron_list = [neuron[0], neuron[n_neurons-1]] +i_neuron_list = [neuron[0], neuron[n_neurons - 1]] i_receptor_list = [0, 0] var_name_list = ["spike", "spike"] - -for i in range(n_test-2): + +for i in range(n_test - 2): i_neuron_list.append(neuron[randrange(n_neurons)]) i_receptor_list.append(0) var_name_list.append("spike") @@ -99,53 +112,50 @@ data_list = ngpu.GetRecordData(record) for i in range(1000): - conn_id = ngpu.GetConnections(i+1) + conn_id = ngpu.GetConnections(i + 1) n_out_conn = len(conn_id) - if (n_out_conn!=NE+NI): - print("Expected number of out connections per neuron: ", NE+NI) - print("Number of out connections of neuron ", i + 1, ": ", \ - n_out_conn) + if n_out_conn != NE + NI: + print("Expected number of out connections per neuron: ", NE + NI) + print("Number of out connections of neuron ", i + 1, ": ", n_out_conn) sys.exit(1) - + for i in range(10): i_target = randrange(n_neurons) - conn_id = ngpu.GetConnections(target=i_target+1) + conn_id = ngpu.GetConnections(target=i_target + 1) n_in_conn = len(conn_id) - if (n_in_conn!=NE+NI+1): - print("Expected number of in connections per neuron: ", NE+NI+1) - print("Number of in connections of neuron ", i_target, ": ", \ - n_in_conn) + if n_in_conn != NE + NI + 1: + print("Expected number of in connections per neuron: ", NE + NI + 1) + print("Number of in connections of neuron ", i_target, ": ", n_in_conn) sys.exit(1) row_sum = list(data_list[0]) -for row in data_list[1:len(data_list)]: +for row in data_list[1 : len(data_list)]: for i in range(len(row_sum)): row_sum[i] = row_sum[i] + row[i] -spike = row_sum[1:len(row_sum)] +spike = row_sum[1 : len(row_sum)] spike_arr = np.array(spike) min_spike_num = np.min(spike_arr) max_spike_num = np.max(spike_arr) -if (min_spike_num < expected_rate - 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) +if min_spike_num < expected_rate - 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Min rate :", min_spike_num) sys.exit(1) - -if (max_spike_num > expected_rate + 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) + +if max_spike_num > expected_rate + 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Max rate :", max_spike_num) sys.exit(1) mean_spike_num = np.mean(spike_arr) diff = abs(mean_spike_num - expected_rate) -max_diff = 3.0*np.sqrt(expected_rate)/np.sqrt(n_test) -print ("Expected rate: ", expected_rate) +max_diff = 3.0 * np.sqrt(expected_rate) / np.sqrt(n_test) +print("Expected rate: ", expected_rate) print("Mean rate: ", mean_spike_num) if diff > max_diff: sys.exit(1) else: sys.exit(0) - diff --git a/python/test/test_connect.py b/python/test/test_connect.py index e24072fe3..c41e0666b 100644 --- a/python/test/test_connect.py +++ b/python/test/test_connect.py @@ -1,42 +1,45 @@ import ctypes + import nestgpu as ngpu N = 5 -neuron = ngpu.Create("aeif_cond_beta_multisynapse", 2*N) +neuron = ngpu.Create("aeif_cond_beta_multisynapse", 2 * N) neuron_even = [] neuron_odd = [] for i in range(N): - neuron_even.append(neuron[2*i]) - neuron_odd.append(neuron[2*i+1]) + neuron_even.append(neuron[2 * i]) + neuron_odd.append(neuron[2 * i + 1]) even_to_odd_delay = [] even_to_odd_weight = [] odd_to_even_delay = [] odd_to_even_weight = [] for itgt in range(N): - ite = 2*itgt - ito = 2*itgt + 1 + ite = 2 * itgt + ito = 2 * itgt + 1 for isrc in range(N): - ise = 2*isrc - iso = 2*isrc + 1 - even_to_odd_delay.append(2.0*N*ise + ito) - even_to_odd_weight.append(100.0*(2.0*N*ise + ito)) - odd_to_even_delay.append(2.0*N*iso + ite) - odd_to_even_weight.append(100.0*(2.0*N*iso + ite)) + ise = 2 * isrc + iso = 2 * isrc + 1 + even_to_odd_delay.append(2.0 * N * ise + ito) + even_to_odd_weight.append(100.0 * (2.0 * N * ise + ito)) + odd_to_even_delay.append(2.0 * N * iso + ite) + odd_to_even_weight.append(100.0 * (2.0 * N * iso + ite)) + + +conn_dict = {"rule": "all_to_all"} +even_to_odd_syn_dict = { + "weight_array": even_to_odd_weight, + "delay_array": even_to_odd_delay, +} +odd_to_even_syn_dict = { + "weight_array": odd_to_even_weight, + "delay_array": odd_to_even_delay, +} -conn_dict={"rule": "all_to_all"} -even_to_odd_syn_dict={ - "weight_array":even_to_odd_weight, - "delay_array":even_to_odd_delay} - -odd_to_even_syn_dict={ - "weight_array":odd_to_even_weight, - "delay_array":odd_to_even_delay} - -ngpu.Connect(neuron_even, neuron_odd, conn_dict, even_to_odd_syn_dict); -ngpu.Connect(neuron_odd, neuron_even, conn_dict, odd_to_even_syn_dict); +ngpu.Connect(neuron_even, neuron_odd, conn_dict, even_to_odd_syn_dict) +ngpu.Connect(neuron_odd, neuron_even, conn_dict, odd_to_even_syn_dict) # Even to all conn_id = ngpu.GetConnections(neuron_even, neuron) @@ -44,7 +47,7 @@ print("########################################") print("Even to all") for i in range(len(conn_status_dict)): - print (conn_status_dict[i]) + print(conn_status_dict[i]) print() print() @@ -53,7 +56,7 @@ print("########################################") print("Even to all weight, delat") for i in range(len(conn_status_dict)): - print (conn_status_dict[i]) + print(conn_status_dict[i]) print() print() @@ -62,7 +65,7 @@ print("########################################") print("All to odd") for i in range(len(conn_status_dict)): - print (conn_status_dict[i]) + print(conn_status_dict[i]) print() print() @@ -73,19 +76,17 @@ print("########################################") print("Even to 3,4,5,6") for i in range(len(conn_status_dict)): - print (conn_status_dict[i]) + print(conn_status_dict[i]) print() print() - + # 3,4,5,6 to odd conn_id = ngpu.GetConnections(neuron_3_6, neuron_odd) conn_status_dict = ngpu.GetStatus(conn_id) print("########################################") print("3,4,5,6 to odd") for i in range(len(conn_status_dict)): - print (conn_status_dict[i]) + print(conn_status_dict[i]) print() print() - - diff --git a/python/test/test_distribution.py b/python/test/test_distribution.py index f7a8f6204..cbe889915 100644 --- a/python/test/test_distribution.py +++ b/python/test/test_distribution.py @@ -1,27 +1,35 @@ import nestgpu as ngpu import numpy as np -neuron = ngpu.Create("aeif_cond_beta_multisynapse",100000) +neuron = ngpu.Create("aeif_cond_beta_multisynapse", 100000) -ngpu.SetStatus(neuron, {"V_m": {"distribution":"normal_clipped", - "mu":-70.0, "low":-90.0, - "high":-50.0, - "sigma":5.0}}) +ngpu.SetStatus( + neuron, + { + "V_m": { + "distribution": "normal_clipped", + "mu": -70.0, + "low": -90.0, + "high": -50.0, + "sigma": 5.0, + } + }, +) l = ngpu.GetStatus(neuron, "V_m") -d=[] +d = [] for elem in l: d.append(elem[0]) - -print (len(d)) + +print(len(d)) import matplotlib.pyplot as plt # An "interface" to matplotlib.axes.Axes.hist() method -n, bins, patches = plt.hist(d, bins='auto', color='#0504aa', alpha=0.7, rwidth=0.85) -plt.grid(axis='y', alpha=0.75) -plt.xlabel('Value') -plt.ylabel('Frequency') -plt.title('V_m Histogram') -plt.text(23, 45, r'$\mu=15, b=3$') +n, bins, patches = plt.hist(d, bins="auto", color="#0504aa", alpha=0.7, rwidth=0.85) +plt.grid(axis="y", alpha=0.75) +plt.xlabel("Value") +plt.ylabel("Frequency") +plt.title("V_m Histogram") +plt.text(23, 45, r"$\mu=15, b=3$") maxfreq = n.max() # Set a clean upper y-axis limit. plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10) diff --git a/python/test/test_ext_neuron.py b/python/test/test_ext_neuron.py index 13e26a608..67b9fac80 100644 --- a/python/test/test_ext_neuron.py +++ b/python/test/test_ext_neuron.py @@ -1,8 +1,10 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 -neuron = ngpu.Create('ext_neuron', 1, 3) +neuron = ngpu.Create("ext_neuron", 1, 3) spike = ngpu.Create("spike_generator") spike_times = [50.0, 100.0, 400.0, 600.0] n_spikes = 4 @@ -12,24 +14,23 @@ delay = [1.0, 50.0, 100.0] weight = [0.1, 0.2, 0.5] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} for syn in range(3): - syn_spec={'receptor': syn, 'weight': weight[syn], 'delay': delay[syn]} + syn_spec = {"receptor": syn, "weight": weight[syn], "delay": delay[syn]} ngpu.Connect(spike, neuron, conn_spec, syn_spec) i_neuron_arr = [neuron[0], neuron[0], neuron[0]] i_receptor_arr = [0, 1, 2] var_name_arr = ["port_value", "port_value", "port_value"] -record = ngpu.CreateRecord("", var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord("", var_name_arr, i_neuron_arr, i_receptor_arr) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -val1=[row[1] for row in data_list] -val2=[row[2] for row in data_list] -val3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +val1 = [row[1] for row in data_list] +val2 = [row[2] for row in data_list] +val3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/test/test_fixed_total_number.py b/python/test/test_fixed_total_number.py index 9ddddb9a9..cff2e2e64 100644 --- a/python/test/test_fixed_total_number.py +++ b/python/test/test_fixed_total_number.py @@ -1,8 +1,9 @@ -import sys -import math import ctypes -import nestgpu as ngpu +import math +import sys from random import randrange + +import nestgpu as ngpu import numpy as np order = 200 @@ -11,23 +12,23 @@ expected_rate = 30.78 print("Building ...") -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_receptors = 2 -NE = 4 * order # number of excitatory neurons -NI = 1 * order # number of inhibitory neurons +NE = 4 * order # number of excitatory neurons +NI = 1 * order # number of inhibitory neurons n_neurons = NE + NI # number of neurons in total -CPN = 1000 # number of connections per neuron +CPN = 1000 # number of connections per neuron Wex = 0.05 Win = 0.35 # poisson generator parameters -poiss_rate = 20000.0 # poisson signal rate in Hz +poiss_rate = 20000.0 # poisson signal rate in Hz poiss_weight = 0.37 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -36,8 +37,8 @@ # Create n_neurons neurons with n_receptor receptor ports neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, n_receptors) -exc_neuron = neuron[0:NE] # excitatory neurons -inh_neuron = neuron[NE:n_neurons] # inhibitory neurons +exc_neuron = neuron[0:NE] # excitatory neurons +inh_neuron = neuron[NE:n_neurons] # inhibitory neurons neuron_list = neuron.ToList() exc_neuron_list = exc_neuron.ToList() inh_neuron_list = inh_neuron.ToList() @@ -46,8 +47,7 @@ E_rev = [0.0, -85.0] tau_decay = [1.0, 1.0] tau_rise = [1.0, 1.0] -ngpu.SetStatus(neuron, {"E_rev":E_rev, "tau_decay":tau_decay, - "tau_rise":tau_rise}) +ngpu.SetStatus(neuron, {"E_rev": E_rev, "tau_decay": tau_decay, "tau_rise": tau_rise}) mean_delay = 0.5 @@ -56,34 +56,48 @@ # Excitatory connections # connect excitatory neurons to port 0 of all neurons # normally distributed delays, weight Wex and CPN connections per neuron -exc_conn_dict={"rule": "fixed_total_number", "total_num": CPN*NE} -exc_syn_dict={"weight": Wex, "delay": {"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":0} +exc_conn_dict = {"rule": "fixed_total_number", "total_num": CPN * NE} +exc_syn_dict = { + "weight": Wex, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 0, +} ngpu.Connect(exc_neuron, neuron_list, exc_conn_dict, exc_syn_dict) # Inhibitory connections # connect inhibitory neurons to port 1 of all neurons # normally distributed delays, weight Win and CPN connections per neuron -inh_conn_dict={"rule": "fixed_total_number", "total_num": CPN*NI} -inh_syn_dict={"weight": Win, "delay":{"distribution":"normal_clipped", - "mu":mean_delay, "low":min_delay, - "high":mean_delay+3*std_delay, - "sigma":std_delay}, "receptor":1} +inh_conn_dict = {"rule": "fixed_total_number", "total_num": CPN * NI} +inh_syn_dict = { + "weight": Win, + "delay": { + "distribution": "normal_clipped", + "mu": mean_delay, + "low": min_delay, + "high": mean_delay + 3 * std_delay, + "sigma": std_delay, + }, + "receptor": 1, +} ngpu.Connect(inh_neuron_list, neuron, inh_conn_dict, inh_syn_dict) -#connect poisson generator to port 0 of all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay, "receptor":0} +# connect poisson generator to port 0 of all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay, "receptor": 0} ngpu.Connect(pg_list, neuron_list, pg_conn_dict, pg_syn_dict) -i_neuron_list = [neuron[0], neuron[n_neurons-1]] +i_neuron_list = [neuron[0], neuron[n_neurons - 1]] i_receptor_list = [0, 0] var_name_list = ["spike", "spike"] - -for i in range(n_test-2): + +for i in range(n_test - 2): i_neuron_list.append(neuron[randrange(n_neurons)]) i_receptor_list.append(0) var_name_list.append("spike") @@ -97,42 +111,41 @@ n_conn_tot = 0 for i in range(1000): - conn_id = ngpu.GetConnections(i+1) + conn_id = ngpu.GetConnections(i + 1) n_out_conn = len(conn_id) n_conn_tot = n_conn_tot + n_out_conn -if (n_conn_tot!=(NE+NI)*CPN): - print("Expected total number of connections: ", (NE+NI)*CPN) +if n_conn_tot != (NE + NI) * CPN: + print("Expected total number of connections: ", (NE + NI) * CPN) print("Total number of connections ", n_conn_tot) sys.exit(1) - + row_sum = list(data_list[0]) -for row in data_list[1:len(data_list)]: +for row in data_list[1 : len(data_list)]: for i in range(len(row_sum)): row_sum[i] = row_sum[i] + row[i] -spike = row_sum[1:len(row_sum)] +spike = row_sum[1 : len(row_sum)] spike_arr = np.array(spike) min_spike_num = np.min(spike_arr) max_spike_num = np.max(spike_arr) -if (min_spike_num < expected_rate - 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) +if min_spike_num < expected_rate - 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Min rate :", min_spike_num) sys.exit(1) - -if (max_spike_num > expected_rate + 3.0*math.sqrt(expected_rate)): - print ("Expected rate: ", expected_rate) + +if max_spike_num > expected_rate + 3.0 * math.sqrt(expected_rate): + print("Expected rate: ", expected_rate) print("Max rate :", max_spike_num) sys.exit(1) mean_spike_num = np.mean(spike_arr) diff = abs(mean_spike_num - expected_rate) -max_diff = 3.0*np.sqrt(expected_rate)/np.sqrt(n_test) -print ("Expected rate: ", expected_rate) +max_diff = 3.0 * np.sqrt(expected_rate) / np.sqrt(n_test) +print("Expected rate: ", expected_rate) print("Mean rate: ", mean_spike_num) if diff > max_diff: sys.exit(1) else: sys.exit(0) - diff --git a/python/test/test_getarr.py b/python/test/test_getarr.py index a8c9fb180..a529da516 100644 --- a/python/test/test_getarr.py +++ b/python/test/test_getarr.py @@ -1,4 +1,5 @@ import sys + import nestgpu as ngpu spike = ngpu.Create("spike_generator", 4) @@ -15,14 +16,11 @@ # set spike times and heights -ngpu.SetStatus(spike0, {"spike_times": spike_time0, - "spike_heights":spike_height0}) +ngpu.SetStatus(spike0, {"spike_times": spike_time0, "spike_heights": spike_height0}) -ngpu.SetStatus(spike1, {"spike_times": spike_time1, - "spike_heights":spike_height1}) +ngpu.SetStatus(spike1, {"spike_times": spike_time1, "spike_heights": spike_height1}) -ngpu.SetStatus(spike2, {"spike_times": spike_time2, - "spike_heights":spike_height2}) +ngpu.SetStatus(spike2, {"spike_times": spike_time2, "spike_heights": spike_height2}) print(ngpu.GetStatus(spike0, "spike_times")) print(ngpu.GetStatus(spike0, "spike_heights")) @@ -44,7 +42,3 @@ print() print() print(ngpu.GetStatus(neuron_list)) - - - - diff --git a/python/test/test_group_param.py b/python/test/test_group_param.py index 5d3e68a09..ba54fbdeb 100644 --- a/python/test/test_group_param.py +++ b/python/test/test_group_param.py @@ -1,11 +1,12 @@ import nestgpu as ngpu -n=ngpu.Create('iaf_psc_exp_g',3) + +n = ngpu.Create("iaf_psc_exp_g", 3) print(ngpu.GetStatus(n)) print(ngpu.GetStatus(n, "V_m_rel")) print(ngpu.GetStatus(n, "C_m")) print(ngpu.GetStatus(n, "tau_m")) print(ngpu.GetStatus(n, "I_syn")) -ngpu.SetStatus(n, {"C_m":120.0, "V_m_rel":17.0, "tau_m":7.0, "I_syn":600.0}) +ngpu.SetStatus(n, {"C_m": 120.0, "V_m_rel": 17.0, "tau_m": 7.0, "I_syn": 600.0}) print(ngpu.GetStatus(n)) print(ngpu.GetStatus(n, "V_m_rel")) print(ngpu.GetStatus(n, "C_m")) diff --git a/python/test/test_iaf_psc_alpha.py b/python/test/test_iaf_psc_alpha.py index 68067d5ce..6bfcf77b0 100644 --- a/python/test/test_iaf_psc_alpha.py +++ b/python/test/test_iaf_psc_alpha.py @@ -1,9 +1,11 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.00005 -neuron = ngpu.Create('iaf_psc_alpha') +neuron = ngpu.Create("iaf_psc_alpha") spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -16,43 +18,42 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m_rel"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[-70.0+row[1] for row in data_list] - +t = [row[0] for row in data_list] +V_m = [-70.0 + row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_iaf_psc_alpha_nest.txt', 'w') as f: +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_iaf_psc_alpha_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('test_iaf_psc_alpha_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_iaf_psc_alpha_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+10]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 10] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_iaf_psc_exp.py b/python/test/test_iaf_psc_exp.py index a6044aa16..331248da7 100644 --- a/python/test/test_iaf_psc_exp.py +++ b/python/test/test_iaf_psc_exp.py @@ -1,8 +1,10 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.00005 -neuron = ngpu.Create('iaf_psc_exp') +neuron = ngpu.Create("iaf_psc_exp") spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] n_spikes = 2 @@ -12,39 +14,39 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'receptor':0, 'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'receptor':1, 'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"receptor": 0, "weight": weight[0], "delay": delay[0]} +syn_spec_in = {"receptor": 1, "weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m_rel"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[-70.0+row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_iaf_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [-70.0 + row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_iaf_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('test_iaf_psc_exp_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -print (len(t)) -print (len(t1)) +data = np.loadtxt("test_iaf_psc_exp_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +print(len(t)) +print(len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_iaf_psc_exp_g.py b/python/test/test_iaf_psc_exp_g.py index c9067284d..c08f74f07 100644 --- a/python/test/test_iaf_psc_exp_g.py +++ b/python/test/test_iaf_psc_exp_g.py @@ -1,12 +1,14 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 E_L = -65.0 ngpu.SetKernelStatus("verbosity_level", 0) -neuron = ngpu.Create('iaf_psc_exp_g', 1) +neuron = ngpu.Create("iaf_psc_exp_g", 1) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] @@ -17,40 +19,40 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m_rel"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1]+E_L for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_iaf_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] + E_L for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_iaf_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_fast_iaf_psc_exp_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -#print (len(t)) -#print (len(t1)) +data = np.loadtxt("../test/test_fast_iaf_psc_exp_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +# print (len(t)) +# print (len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_iaf_psc_exp_hc.py b/python/test/test_iaf_psc_exp_hc.py index b2f8f8b22..2a329116f 100644 --- a/python/test/test_iaf_psc_exp_hc.py +++ b/python/test/test_iaf_psc_exp_hc.py @@ -1,12 +1,14 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.0005 E_L = -65.0 ngpu.SetKernelStatus("verbosity_level", 0) -neuron = ngpu.Create('iaf_psc_exp_hc', 1) +neuron = ngpu.Create("iaf_psc_exp_hc", 1) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] @@ -17,40 +19,40 @@ delay = [1.0, 100.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m_rel"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1]+E_L for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_iaf_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] + E_L for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_iaf_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_fast_iaf_psc_exp_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -#print (len(t)) -#print (len(t1)) +data = np.loadtxt("../test/test_fast_iaf_psc_exp_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +# print (len(t)) +# print (len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) diff --git a/python/test/test_izh.py b/python/test/test_izh.py index ab397d6e8..f999baebe 100644 --- a/python/test/test_izh.py +++ b/python/test/test_izh.py @@ -1,9 +1,11 @@ import sys + import nestgpu as ngpu import numpy as np + tolerance = 0.005 -neuron = ngpu.Create('izhikevich', 1) -#ngpu.SetStatus(neuron, {"tau_syn": 1.0e-6}) +neuron = ngpu.Create("izhikevich", 1) +# ngpu.SetStatus(neuron, {"tau_syn": 1.0e-6}) spike = ngpu.Create("spike_generator") spike_times = [10.0, 40.0] n_spikes = 2 @@ -13,52 +15,52 @@ delay = [1.0, 10.0] weight = [1.0, -2.0] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} -syn_spec_ex={'weight': weight[0], 'delay': delay[0]} -syn_spec_in={'weight': weight[1], 'delay': delay[1]} +syn_spec_ex = {"weight": weight[0], "delay": delay[0]} +syn_spec_in = {"weight": weight[1], "delay": delay[1]} ngpu.Connect(spike, neuron, conn_spec, syn_spec_ex) ngpu.Connect(spike, neuron, conn_spec, syn_spec_in) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) -#voltmeter = nest.Create('voltmeter') -#nest.Connect(voltmeter, neuron) +# voltmeter = nest.Create('voltmeter') +# nest.Connect(voltmeter, neuron) ngpu.Simulate(80.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] -#dmm = nest.GetStatus(voltmeter)[0] -#V_m = dmm["events"]["V_m"] -#t = dmm["events"]["times"] -#with open('test_iaf_psc_exp_nest.txt', 'w') as f: +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] +# dmm = nest.GetStatus(voltmeter)[0] +# V_m = dmm["events"]["V_m"] +# t = dmm["events"]["times"] +# with open('test_iaf_psc_exp_nest.txt', 'w') as f: # for i in range(len(t)): # f.write("%s\t%s\n" % (t[i], V_m[i])) -data = np.loadtxt('../test/test_izh_nest.txt', delimiter="\t") -t1=[x[0] for x in data ] -V_m1=[x[1] for x in data ] -#print (len(t)) -#print (len(t1)) +data = np.loadtxt("../test/test_izh_nest.txt", delimiter="\t") +t1 = [x[0] for x in data] +V_m1 = [x[1] for x in data] +# print (len(t)) +# print (len(t1)) -dV=[V_m[i*10+20]-V_m1[i] for i in range(len(t1))] -rmse =np.std(dV)/abs(np.mean(V_m)) +dV = [V_m[i * 10 + 20] - V_m1[i] for i in range(len(t1))] +rmse = np.std(dV) / abs(np.mean(V_m)) print("rmse : ", rmse, " tolerance: ", tolerance) -if rmse>tolerance: +if rmse > tolerance: sys.exit(1) sys.exit(0) -#import matplotlib.pyplot as plt +# import matplotlib.pyplot as plt -#fig1 = plt.figure(1) -#plt.plot(t, V_m) -#fig1.suptitle("NEST GPU") -#fig2 = plt.figure(2) -#plt.plot(t1, V_m1) -#fig2.suptitle("NEST") -#plt.draw() -#plt.pause(1) -#ngpu.waitenter("<Hit Enter To Close>") -#plt.close() +# fig1 = plt.figure(1) +# plt.plot(t, V_m) +# fig1.suptitle("NEST GPU") +# fig2 = plt.figure(2) +# plt.plot(t1, V_m1) +# fig2.suptitle("NEST") +# plt.draw() +# plt.pause(1) +# ngpu.waitenter("<Hit Enter To Close>") +# plt.close() diff --git a/python/test/test_setvar.py b/python/test/test_setvar.py index 89797a130..bf047dfc2 100644 --- a/python/test/test_setvar.py +++ b/python/test/test_setvar.py @@ -1,20 +1,20 @@ import sys + import nestgpu as ngpu n_neurons = 3 # create n_neurons neurons with 2 receptor ports -neuron = ngpu.Create('aeif_cond_beta_multisynapse', n_neurons, 2) -ngpu.SetStatus(neuron, {'tau_decay':[60.0, 10.0], - 'tau_rise':[40.0, 5.0]}) +neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, 2) +ngpu.SetStatus(neuron, {"tau_decay": [60.0, 10.0], "tau_rise": [40.0, 5.0]}) neuron0 = neuron[0:0] neuron1 = neuron[1:1] neuron2 = neuron[2:2] - -ngpu.SetStatus(neuron0, {'V_m':-80.0}) -ngpu.SetStatus(neuron1, {'g1':[0.0, 0.1]}) -ngpu.SetStatus(neuron2, {'g1':[0.1, 0.0]}) + +ngpu.SetStatus(neuron0, {"V_m": -80.0}) +ngpu.SetStatus(neuron1, {"g1": [0.0, 0.1]}) +ngpu.SetStatus(neuron2, {"g1": [0.1, 0.0]}) # reading parameters and variables test @@ -48,16 +48,15 @@ i_receptor_arr = [0, 0, 0] # create multimeter record of V_m var_name_arr = ["V_m", "V_m", "V_m"] -record = ngpu.CreateRecord("", var_name_arr, i_neuron_arr, - i_receptor_arr) +record = ngpu.CreateRecord("", var_name_arr, i_neuron_arr, i_receptor_arr) ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V1=[row[1] for row in data_list] -V2=[row[2] for row in data_list] -V3=[row[3] for row in data_list] +t = [row[0] for row in data_list] +V1 = [row[1] for row in data_list] +V2 = [row[2] for row in data_list] +V3 = [row[3] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/test/test_setvar2.py b/python/test/test_setvar2.py index 3acd4e838..a67f1031b 100644 --- a/python/test/test_setvar2.py +++ b/python/test/test_setvar2.py @@ -1,25 +1,24 @@ import sys + import nestgpu as ngpu n_neurons = 6 # create n_neurons neurons with 2 receptor ports -neuron = ngpu.Create('aeif_cond_beta_multisynapse', n_neurons, 2) +neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, 2) neuron_even = [neuron[0], neuron[2], neuron[4]] neuron_odd = [neuron[3], neuron[5], neuron[1]] -ngpu.SetStatus(neuron_even, {'tau_decay':[80.0, 40.0], - 'tau_rise':[60.0, 20.0]}) -ngpu.SetStatus(neuron_odd, {'tau_decay':[70.0, 30.0], - 'tau_rise':[50.0, 10.0]}) +ngpu.SetStatus(neuron_even, {"tau_decay": [80.0, 40.0], "tau_rise": [60.0, 20.0]}) +ngpu.SetStatus(neuron_odd, {"tau_decay": [70.0, 30.0], "tau_rise": [50.0, 10.0]}) -ngpu.SetStatus(neuron_even, {'V_m':-80.0}) -ngpu.SetStatus(neuron_odd, {'V_m':-90.0}) +ngpu.SetStatus(neuron_even, {"V_m": -80.0}) +ngpu.SetStatus(neuron_odd, {"V_m": -90.0}) -ngpu.SetStatus(neuron_even, {'g1':[0.4, 0.2]}) -ngpu.SetStatus(neuron_odd, {'g1':[0.3, 0.1]}) +ngpu.SetStatus(neuron_even, {"g1": [0.4, 0.2]}) +ngpu.SetStatus(neuron_odd, {"g1": [0.3, 0.1]}) -ngpu.SetStatus(neuron_even, {'V_th':-40.0}) -ngpu.SetStatus(neuron_odd, {'V_th':-30.0}) +ngpu.SetStatus(neuron_even, {"V_th": -40.0}) +ngpu.SetStatus(neuron_odd, {"V_th": -30.0}) # reading parameters and variables test read_td = ngpu.GetNeuronStatus(neuron, "tau_decay") @@ -35,8 +34,7 @@ print("read_g1", read_g1) # reading parameters and variables from neuron list test -neuron_list = [neuron[0], neuron[2], neuron[4], neuron[1], neuron[3], - neuron[5]] +neuron_list = [neuron[0], neuron[2], neuron[4], neuron[1], neuron[3], neuron[5]] read1_td = ngpu.GetNeuronStatus(neuron_list, "tau_decay") read1_tr = ngpu.GetNeuronStatus(neuron_list, "tau_rise") read1_Vm = ngpu.GetNeuronStatus(neuron_list, "V_m") @@ -48,4 +46,3 @@ print("read1_Vm", read1_Vm) print("read1_Vth", read1_Vth) print("read1_g1", read1_g1) - diff --git a/python/test/test_setvar3.py b/python/test/test_setvar3.py index 3ea97a73b..781b78dae 100644 --- a/python/test/test_setvar3.py +++ b/python/test/test_setvar3.py @@ -1,25 +1,24 @@ import sys + import nestgpu as ngpu n_neurons = 6 # create n_neurons neurons with 2 receptor ports -neuron = ngpu.Create('aeif_cond_beta_multisynapse', n_neurons, 2) +neuron = ngpu.Create("aeif_cond_beta_multisynapse", n_neurons, 2) neuron_even = [neuron[0], neuron[2], neuron[4]] neuron_odd = [neuron[3], neuron[5], neuron[1]] -ngpu.SetStatus(neuron_even, {'tau_decay':[80.0, 40.0], - 'tau_rise':[60.0, 20.0]}) -ngpu.SetStatus(neuron_odd, {'tau_decay':[70.0, 30.0], - 'tau_rise':[50.0, 10.0]}) +ngpu.SetStatus(neuron_even, {"tau_decay": [80.0, 40.0], "tau_rise": [60.0, 20.0]}) +ngpu.SetStatus(neuron_odd, {"tau_decay": [70.0, 30.0], "tau_rise": [50.0, 10.0]}) -ngpu.SetStatus(neuron_even, {'V_m':-80.0}) -ngpu.SetStatus(neuron_odd, {'V_m':-90.0}) +ngpu.SetStatus(neuron_even, {"V_m": -80.0}) +ngpu.SetStatus(neuron_odd, {"V_m": -90.0}) -ngpu.SetStatus(neuron_even, {'g1':[0.4, 0.2]}) -ngpu.SetStatus(neuron_odd, {'g1':[0.3, 0.1]}) +ngpu.SetStatus(neuron_even, {"g1": [0.4, 0.2]}) +ngpu.SetStatus(neuron_odd, {"g1": [0.3, 0.1]}) -ngpu.SetStatus(neuron_even, {'V_th':-40.0}) -ngpu.SetStatus(neuron_odd, {'V_th':-30.0}) +ngpu.SetStatus(neuron_even, {"V_th": -40.0}) +ngpu.SetStatus(neuron_odd, {"V_th": -30.0}) # reading parameters and variables test read_td = ngpu.GetNeuronStatus(neuron, "tau_decay") @@ -35,8 +34,7 @@ print("read_g1", read_g1) # reading parameters and variables from neuron list test -neuron_list = [neuron[0], neuron[2], neuron[4], neuron[1], neuron[3], - neuron[5]] +neuron_list = [neuron[0], neuron[2], neuron[4], neuron[1], neuron[3], neuron[5]] read1_td = ngpu.GetNeuronStatus(neuron_list, "tau_decay") read1_tr = ngpu.GetNeuronStatus(neuron_list, "tau_rise") read1_Vm = ngpu.GetNeuronStatus(neuron_list, "V_m") @@ -82,5 +80,3 @@ print() print(ngpu.GetStatus(neuron_odd, "V_m")) - - diff --git a/python/test/test_spike_detector.py b/python/test/test_spike_detector.py index 4c204537b..0e303c637 100644 --- a/python/test/test_spike_detector.py +++ b/python/test/test_spike_detector.py @@ -1,19 +1,20 @@ import sys + import nestgpu as ngpu tolerance = 1.0e-6 neuron = ngpu.Create("aeif_cond_beta_multisynapse", 3) -ngpu.SetStatus(neuron, {"I_e":1000.0}) +ngpu.SetStatus(neuron, {"I_e": 1000.0}) spike_det = ngpu.Create("spike_detector") -conn_dict={"rule": "one_to_one"} -syn_dict1={"weight": 1.0, "delay": 10.0, "receptor":0} -syn_dict2={"weight": 2.0, "delay": 20.0, "receptor":0} -syn_dict3={"weight": 3.0, "delay": 30.0, "receptor":0} +conn_dict = {"rule": "one_to_one"} +syn_dict1 = {"weight": 1.0, "delay": 10.0, "receptor": 0} +syn_dict2 = {"weight": 2.0, "delay": 20.0, "receptor": 0} +syn_dict3 = {"weight": 3.0, "delay": 30.0, "receptor": 0} ngpu.Connect([neuron[0]], spike_det, conn_dict, syn_dict1) @@ -28,38 +29,38 @@ ngpu.Simulate() data_n = ngpu.GetRecordData(record_n) -t_n=[row[0] for row in data_n] -spike_n=[row[1] for row in data_n] +t_n = [row[0] for row in data_n] +spike_n = [row[1] for row in data_n] data_sd = ngpu.GetRecordData(record_sd) -t_sd=[row[0] for row in data_sd] -spike_sd=[row[1] for row in data_sd] +t_sd = [row[0] for row in data_sd] +spike_sd = [row[1] for row in data_sd] -for i in range(len(t_n)-400): - if spike_n[i]>0.5: +for i in range(len(t_n) - 400): + if spike_n[i] > 0.5: j1 = i + 101 j2 = i + 201 j3 = i + 301 - if abs(spike_sd[j1] - 1.0)>tolerance: + if abs(spike_sd[j1] - 1.0) > tolerance: print("Expected spike height: 1.0, simulated: ", spike_sd[j1]) sys.exit(1) - if abs(spike_sd[j2] - 2.0)>tolerance: + if abs(spike_sd[j2] - 2.0) > tolerance: print("Expected spike height: 2.0, simulated: ", spike_sd[j2]) sys.exit(1) - if abs(spike_sd[j3] - 3.0)>tolerance: + if abs(spike_sd[j3] - 3.0) > tolerance: print("Expected spike height: 3.0, simulated: ", spike_sd[j3]) sys.exit(1) - -#import matplotlib.pyplot as plt -#plt.figure(1) -#plt.plot(t_n, spike_n) +# import matplotlib.pyplot as plt + +# plt.figure(1) +# plt.plot(t_n, spike_n) -#plt.figure(2) -#plt.plot(t_sd, spike_sd) +# plt.figure(2) +# plt.plot(t_sd, spike_sd) -#plt.draw() -#plt.pause(1) -#raw_input("<Hit Enter To Close>") -#plt.close() +# plt.draw() +# plt.pause(1) +# raw_input("<Hit Enter To Close>") +# plt.close() sys.exit(0) diff --git a/python/test/test_spike_times.py b/python/test/test_spike_times.py index 92f2d420e..80e8bd79d 100644 --- a/python/test/test_spike_times.py +++ b/python/test/test_spike_times.py @@ -1,19 +1,20 @@ -import sys -import math import ctypes -import nestgpu as ngpu +import math +import sys from random import randrange + +import nestgpu as ngpu import numpy as np -ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers +ngpu.SetKernelStatus("rnd_seed", 1234) # seed for GPU random numbers n_neurons = 30 eps = 1.0e-6 # poisson generator parameters -poiss_rate = 500.0 # poisson signal rate in Hz +poiss_rate = 500.0 # poisson signal rate in Hz poiss_weight = 4.0 -poiss_delay = 0.2 # poisson signal delay in ms +poiss_delay = 0.2 # poisson signal delay in ms # create poisson generator pg = ngpu.Create("poisson_generator") @@ -28,25 +29,24 @@ # Create n_neurons spike detectors sd = ngpu.Create("spike_detector", n_neurons) -#connect poisson generator to all neurons -pg_conn_dict={"rule": "all_to_all"} -pg_syn_dict={"weight": poiss_weight, "delay": poiss_delay} +# connect poisson generator to all neurons +pg_conn_dict = {"rule": "all_to_all"} +pg_syn_dict = {"weight": poiss_weight, "delay": poiss_delay} ngpu.Connect(pg, neuron, pg_conn_dict, pg_syn_dict) -#connect neurons to spike detectors -sd_conn_dict={"rule": "one_to_one"} -sd_syn_dict={"weight": 1.0, "delay": 0.1} +# connect neurons to spike detectors +sd_conn_dict = {"rule": "one_to_one"} +sd_syn_dict = {"weight": 1.0, "delay": 0.1} ngpu.Connect(neuron, sd, sd_conn_dict, sd_syn_dict) # create multimeter record of spikes i_node_list = sd.ToList() -i_receptor_list = [0]*n_neurons -var_name_list = ["spike_height"]*n_neurons +i_receptor_list = [0] * n_neurons +var_name_list = ["spike_height"] * n_neurons -record = ngpu.CreateRecord("", var_name_list, i_node_list, \ - i_receptor_list) +record = ngpu.CreateRecord("", var_name_list, i_node_list, i_receptor_list) ngpu.Simulate(490) ngpu.SetStatus(pg, "rate", 0.0) @@ -55,70 +55,74 @@ data_list = ngpu.GetRecordData(record) row_sum = list(data_list[0]) -for row in data_list[1:len(data_list)]: +for row in data_list[1 : len(data_list)]: for i in range(len(row_sum)): row_sum[i] = row_sum[i] + row[i] - + spike_times = [] for i in range(len(neuron)): spike_times.append([]) - -for row in data_list[0:len(data_list)]: - for i in range(1,len(row)): + +for row in data_list[0 : len(data_list)]: + for i in range(1, len(row)): y = row[i] - if y>0.5: - #print(i, row[0]) - #print (spike_times) - spike_times[i-1].append(round(row[0]-0.2,4)) - + if y > 0.5: + # print(i, row[0]) + # print (spike_times) + spike_times[i - 1].append(round(row[0] - 0.2, 4)) + -spike = row_sum[1:len(row_sum)] -#print (spike) +spike = row_sum[1 : len(row_sum)] +# print (spike) spike_count = ngpu.GetStatus(neuron, "spike_count") -#print (spike_count) +# print (spike_count) -if (len(spike) != len(spike_count)): +if len(spike) != len(spike_count): print("Error: len(spike) != len(spike_count)") print("len(spike) ", len(spike)) - print("len(spike_count) ", len(spike_count)) + print("len(spike_count) ", len(spike_count)) sys.exit(1) - + for i in range(len(spike)): - #print spike_count[i][0] - #print (spike_count[i], spike[i]) + # print spike_count[i][0] + # print (spike_count[i], spike[i]) diff = spike[i] - spike_count[i][0] if abs(diff) > eps: print("Error: inconsistent number of spikes of node n. ", i) print("spike detector count ", spike[i]) print("node count ", spike_count[i][0]) sys.exit(1) - -if (len(spike_times) != len(neuron)): + +if len(spike_times) != len(neuron): print("Error: len(spike_times) != len(neuron)") print("len(spike_times) ", len(spike_times)) - print("len(neuron) ", len(neuron)) + print("len(neuron) ", len(neuron)) sys.exit(1) spike_times_list = ngpu.GetRecSpikeTimes(neuron) for j in range(len(neuron)): spike_times1 = spike_times_list[j] - #print (spike_times1) - #print (spike_times[j]) - if (len(spike_times1) != spike_count[j][0]): + # print (spike_times1) + # print (spike_times[j]) + if len(spike_times1) != spike_count[j][0]: print("Error: inconsistent number of spikes of node n. ", j) print("n. of recorded spike times ", len(spike_times1)) print("node count ", spike_count[j][0]) sys.exit(1) - + for i in range(len(spike_times1)): - spike_times1[i]=round(spike_times1[i],4) + spike_times1[i] = round(spike_times1[i], 4) diff = spike_times1[i] - spike_times[j][i] if abs(diff) > eps: - print("Error: inconsistent recorded spikes times of node n. ", j, \ - " spike n. ", i) + print( + "Error: inconsistent recorded spikes times of node n. ", + j, + " spike n. ", + i, + ) print("multimeter spike time ", spike_times[j][i]) print("node recorded spike time ", spike_times1[i]) sys.exit(1) diff --git a/python/test/test_stdp.py b/python/test/test_stdp.py index 57de5baaf..8d00dba55 100644 --- a/python/test/test_stdp.py +++ b/python/test/test_stdp.py @@ -1,19 +1,20 @@ -import sys import math +import sys + import nestgpu as ngpu -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: + +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -31,19 +32,27 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ Wmax = 0.001 den_delay = 0.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) sg = ngpu.Create("spike_generator", N) -neuron = ngpu.Create("aeif_cond_beta_multisynapse", 2*N) -ngpu.SetStatus(neuron, {"t_ref": 1000.0, "den_delay":den_delay}) +neuron = ngpu.Create("aeif_cond_beta_multisynapse", 2 * N) +ngpu.SetStatus(neuron, {"t_ref": 1000.0, "den_delay": den_delay}) neuron0 = neuron[0:N] -neuron1 = neuron[N:2*N] +neuron1 = neuron[N : 2 * N] dt_list = [] for i in range(N): - dt_list.append(dt_step*(-0.5*(N-1) + i)) + dt_list.append(dt_step * (-0.5 * (N - 1) + i)) spike_time = [50.0] spike_height = [1.0] @@ -51,23 +60,26 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ time_diff = 400.0 # set spike times and height -ngpu.SetStatus(sg, {"spike_times": spike_time, "spike_heights":spike_height}) +ngpu.SetStatus(sg, {"spike_times": spike_time, "spike_heights": spike_height}) delay0 = 1.0 delay1 = delay0 + time_diff -weight_sg = 17.9 # to make it spike immediately and only once -weight_stdp = Wmax/2 +weight_sg = 17.9 # to make it spike immediately and only once +weight_stdp = Wmax / 2 -conn_dict={"rule": "one_to_one"} -syn_dict0={"weight":weight_sg, "delay":delay0} -syn_dict1={"weight":weight_sg, "delay":delay1} +conn_dict = {"rule": "one_to_one"} +syn_dict0 = {"weight": weight_sg, "delay": delay0} +syn_dict1 = {"weight": weight_sg, "delay": delay1} ngpu.Connect(sg, neuron0, conn_dict, syn_dict0) ngpu.Connect(sg, neuron1, conn_dict, syn_dict1) for i in range(N): delay_stdp = time_diff - dt_list[i] - syn_dict_stdp={"weight":weight_stdp, "delay":delay_stdp, \ - "synapse_group":syn_group} + syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay_stdp, + "synapse_group": syn_group, + } ngpu.Connect([neuron0[i]], [neuron1[i]], conn_dict, syn_dict_stdp) ngpu.Simulate(1000.0) @@ -80,11 +92,20 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ expect_w = [] dw = [] for i in range(N): - w1 = STDPUpdate(weight_stdp, dt[i], tau_plus, tau_minus, lambd*Wmax, alpha, \ - mu_plus, mu_minus, Wmax) + w1 = STDPUpdate( + weight_stdp, + dt[i], + tau_plus, + tau_minus, + lambd * Wmax, + alpha, + mu_plus, + mu_minus, + Wmax, + ) expect_w.append(w1) - dw.append(w1-w[i]) - if abs(dw[i])>tolerance: + dw.append(w1 - w[i]) + if abs(dw[i]) > tolerance: print("Expected weight: ", w1, " simulated: ", w[i]) sys.exit(1) diff --git a/python/test/test_stdp/cases/case1.py b/python/test/test_stdp/cases/case1.py index b96ca3033..3380743d2 100644 --- a/python/test/test_stdp/cases/case1.py +++ b/python/test/test_stdp/cases/case1.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_post = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_post, 20) -#spike generators +# spike generators sg_pre = ngpu.Create("spike_generator") sg_post = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_post, {"spike_times": spike_times_post}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 3.0 +den_delay = 3.0 weight_stdp = 1.0 delay = 1.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,20 +90,18 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt1 = 1.0 -w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt2 = 3.0 -w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w2) -print("dw/w: ", (w2 - w[0])/w2) +print("dw/w: ", (w2 - w[0]) / w2) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/cases/case10.py b/python/test/test_stdp/cases/case10.py index 85a0dd561..e26845081 100644 --- a/python/test/test_stdp/cases/case10.py +++ b/python/test/test_stdp/cases/case10.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_pre = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_pre, 20) -#spike generators +# spike generators sg_post = ngpu.Create("spike_generator") sg_pre = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_pre, {"spike_times": spike_times_pre}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 1.0 +den_delay = 1.0 weight_stdp = 1.0 delay = 3.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,16 +90,15 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt = -2.0 -w1 = STDPUpdate(weight_stdp, Dt, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w1) -print("dw/w: ", (w1 - w[0])/w1) +print("dw/w: ", (w1 - w[0]) / w1) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/cases/case2.py b/python/test/test_stdp/cases/case2.py index 6e23cd7f2..519798b72 100644 --- a/python/test/test_stdp/cases/case2.py +++ b/python/test/test_stdp/cases/case2.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_post = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_post, 20) -#spike generators +# spike generators sg_pre = ngpu.Create("spike_generator") sg_post = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_post, {"spike_times": spike_times_post}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 3.0 +den_delay = 3.0 weight_stdp = 1.0 delay = 1.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,20 +90,18 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt1 = -1.0 -w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt2 = 3.0 -w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w2) -print("dw/w: ", (w2 - w[0])/w2) +print("dw/w: ", (w2 - w[0]) / w2) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/cases/case3.py b/python/test/test_stdp/cases/case3.py index 9ce435a4b..80d15385a 100644 --- a/python/test/test_stdp/cases/case3.py +++ b/python/test/test_stdp/cases/case3.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_post = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_post, 20) -#spike generators +# spike generators sg_pre = ngpu.Create("spike_generator") sg_post = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_post, {"spike_times": spike_times_post}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 3.0 +den_delay = 3.0 weight_stdp = 1.0 delay = 1.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,28 +90,24 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt1 = -1.0 -w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt2 = -3.5 -w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt3 = 0.5 -w3 = STDPUpdate(w2, Dt3, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w3 = STDPUpdate(w2, Dt3, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt4 = 1.5 -w4 = STDPUpdate(w3, Dt4, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w4 = STDPUpdate(w3, Dt4, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w4) -print("dw/w: ", (w4 - w[0])/w4) +print("dw/w: ", (w4 - w[0]) / w4) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/cases/case4.py b/python/test/test_stdp/cases/case4.py index becad725c..e1b1e13f1 100644 --- a/python/test/test_stdp/cases/case4.py +++ b/python/test/test_stdp/cases/case4.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_post = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_post, 20) -#spike generators +# spike generators sg_pre = ngpu.Create("spike_generator") sg_post = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_post, {"spike_times": spike_times_post}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 3.0 +den_delay = 3.0 weight_stdp = 1.0 delay = 1.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,28 +90,24 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt1 = 1.0 -w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt2 = -3.5 -w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt3 = 0.5 -w3 = STDPUpdate(w2, Dt3, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w3 = STDPUpdate(w2, Dt3, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt4 = 1.5 -w4 = STDPUpdate(w3, Dt4, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w4 = STDPUpdate(w3, Dt4, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w4) -print("dw/w: ", (w4 - w[0])/w4) +print("dw/w: ", (w4 - w[0]) / w4) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/cases/case5.py b/python/test/test_stdp/cases/case5.py index d80f38406..8e678d553 100644 --- a/python/test/test_stdp/cases/case5.py +++ b/python/test/test_stdp/cases/case5.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_post = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_post, 20) -#spike generators +# spike generators sg_pre = ngpu.Create("spike_generator") sg_post = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_post, {"spike_times": spike_times_post}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 3.0 +den_delay = 3.0 weight_stdp = 1.0 delay = 1.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,16 +90,15 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt = 2.0 -w1 = STDPUpdate(weight_stdp, Dt, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w1) -print("dw/w: ", (w1 - w[0])/w1) +print("dw/w: ", (w1 - w[0]) / w1) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_post[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/cases/case6.py b/python/test/test_stdp/cases/case6.py index 7b7c8bb7c..5ed73742b 100644 --- a/python/test/test_stdp/cases/case6.py +++ b/python/test/test_stdp/cases/case6.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_pre = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_pre, 20) -#spike generators +# spike generators sg_post = ngpu.Create("spike_generator") sg_pre = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_pre, {"spike_times": spike_times_pre}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 1.0 +den_delay = 1.0 weight_stdp = 1.0 delay = 3.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,20 +90,18 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt1 = -1.0 -w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt2 = -3.0 -w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w2) -print("dw/w: ", (w2 - w[0])/w2) +print("dw/w: ", (w2 - w[0]) / w2) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/cases/case7.py b/python/test/test_stdp/cases/case7.py index 598bdf9c9..eeb0898cd 100644 --- a/python/test/test_stdp/cases/case7.py +++ b/python/test/test_stdp/cases/case7.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_pre = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_pre, 20) -#spike generators +# spike generators sg_post = ngpu.Create("spike_generator") sg_pre = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_pre, {"spike_times": spike_times_pre}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 1.0 +den_delay = 1.0 weight_stdp = 1.0 delay = 3.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,20 +90,18 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt1 = 1.0 -w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt2 = -3.0 -w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w2) -print("dw/w: ", (w2 - w[0])/w2) +print("dw/w: ", (w2 - w[0]) / w2) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/cases/case8.py b/python/test/test_stdp/cases/case8.py index 1ff0c377b..240ccd5d2 100644 --- a/python/test/test_stdp/cases/case8.py +++ b/python/test/test_stdp/cases/case8.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_pre = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_pre, 20) -#spike generators +# spike generators sg_post = ngpu.Create("spike_generator") sg_pre = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_pre, {"spike_times": spike_times_pre}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 1.0 +den_delay = 1.0 weight_stdp = 1.0 delay = 3.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,28 +90,24 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt1 = 1.0 -w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt2 = 3.5 -w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt3 = -0.5 -w3 = STDPUpdate(w2, Dt3, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w3 = STDPUpdate(w2, Dt3, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt4 = -1.5 -w4 = STDPUpdate(w3, Dt4, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w4 = STDPUpdate(w3, Dt4, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w4) -print("dw/w: ", (w4 - w[0])/w4) +print("dw/w: ", (w4 - w[0]) / w4) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/cases/case9.py b/python/test/test_stdp/cases/case9.py index 82cf9cd41..2894d12f9 100644 --- a/python/test/test_stdp/cases/case9.py +++ b/python/test/test_stdp/cases/case9.py @@ -1,22 +1,23 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu sim_time = 20.0 + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -27,7 +28,7 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_pre = ngpu.Create("parrot_neuron") ngpu.ActivateRecSpikeTimes(neuron_pre, 20) -#spike generators +# spike generators sg_post = ngpu.Create("spike_generator") sg_pre = ngpu.Create("spike_generator") @@ -39,8 +40,8 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ ngpu.SetStatus(sg_pre, {"spike_times": spike_times_pre}) # connect spike generators to neurons -syn_dict={"weight":1.0, "delay":1.0} -conn_dict={"rule": "one_to_one"} +syn_dict = {"weight": 1.0, "delay": 1.0} +conn_dict = {"rule": "one_to_one"} ngpu.Connect(sg_post, neuron_post, conn_dict, syn_dict) ngpu.Connect(sg_pre, neuron_pre, conn_dict, syn_dict) @@ -52,18 +53,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ mu_plus = 1.0 mu_minus = 1.0 Wmax = 10.0 -den_delay = 1.0 +den_delay = 1.0 weight_stdp = 1.0 delay = 3.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - -syn_dict_stdp={"weight":weight_stdp, "delay":delay, \ - "synapse_group":syn_group, "receptor":1} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + +syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay, + "synapse_group": syn_group, + "receptor": 1, +} ngpu.SetStatus(neuron_post, {"den_delay": den_delay}) @@ -77,28 +90,24 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ print("Initial weight: ", weight_stdp) print("Simulated weight: ", w[0]) -Wplus = Wmax*lambd +Wplus = Wmax * lambd Dt1 = -1.0 -w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w1 = STDPUpdate(weight_stdp, Dt1, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt2 = 3.5 -w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w2 = STDPUpdate(w1, Dt2, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt3 = -0.5 -w3 = STDPUpdate(w2, Dt3, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w3 = STDPUpdate(w2, Dt3, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) Dt4 = -1.5 -w4 = STDPUpdate(w3, Dt4, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) +w4 = STDPUpdate(w3, Dt4, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) print("Expected theoretical weight: ", w4) -print("dw/w: ", (w4 - w[0])/w4) +print("dw/w: ", (w4 - w[0]) / w4) -#spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) -#spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) -#print(spike_times0) -#print(spike_times1) +# spike_times0=ngpu.GetRecSpikeTimes(neuron_post[0]) +# spike_times1=ngpu.GetRecSpikeTimes(neuron_pre[0]) +# print(spike_times0) +# print(spike_times1) diff --git a/python/test/test_stdp/long_test/test_stdp.py b/python/test/test_stdp/long_test/test_stdp.py index aa12b54e9..d87edd6a8 100644 --- a/python/test/test_stdp/long_test/test_stdp.py +++ b/python/test/test_stdp/long_test/test_stdp.py @@ -1,33 +1,34 @@ -import nestgpu as ngpu import math + import matplotlib.pyplot as plt +import nestgpu as ngpu + # STDP weight update theoretical formula for comparison -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 -Dt_offset = 2.0 # time difference between presynaptic and postsynaptic spike -N=400 # number of presynaptic and postsynaptic neurons -Dt_max = 5.0 # maximum axonal/dendritic delay +Dt_offset = 2.0 # time difference between presynaptic and postsynaptic spike +N = 400 # number of presynaptic and postsynaptic neurons +Dt_max = 5.0 # maximum axonal/dendritic delay sg_delay_m = 20.0 Dt_spike = 100.0 -n_spikes = 10; +n_spikes = 10 -sim_time = Dt_spike*(n_spikes + 2) +sim_time = Dt_spike * (n_spikes + 2) # STDP connection parameters tau_plus = 20.0 @@ -44,42 +45,54 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ neuron_pre = ngpu.Create("parrot_neuron", N) neuron_post = ngpu.Create("parrot_neuron", N) -#spike generator +# spike generator sg = ngpu.Create("spike_generator") # spike generator produces n_spikes spikes with time interval Dt_spike spike_times = [] for i in range(n_spikes): - spike_times.append(Dt_spike*(i+1)) - + spike_times.append(Dt_spike * (i + 1)) + ngpu.SetStatus(sg, {"spike_times": spike_times}) -#connect spike generator to parrot neurons -sg_conn_dict={"rule": "all_to_all"} -syn_dict_sg_pre={"weight":1.0, "delay":sg_delay_m-Dt_offset/2.0} +# connect spike generator to parrot neurons +sg_conn_dict = {"rule": "all_to_all"} +syn_dict_sg_pre = {"weight": 1.0, "delay": sg_delay_m - Dt_offset / 2.0} ngpu.Connect(sg, neuron_pre, sg_conn_dict, syn_dict_sg_pre) -syn_dict_sg_post={"weight":1.0, "delay":sg_delay_m+Dt_offset/2.0} +syn_dict_sg_post = {"weight": 1.0, "delay": sg_delay_m + Dt_offset / 2.0} ngpu.Connect(sg, neuron_post, sg_conn_dict, syn_dict_sg_post) -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) -conn_dict={"rule": "one_to_one"} +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) +conn_dict = {"rule": "one_to_one"} for j in range(N): - delay_post = 0.1 + round(Dt_max*j/N,1) + delay_post = 0.1 + round(Dt_max * j / N, 1) ngpu.SetStatus([neuron_post[j]], {"den_delay": delay_post}) for i in range(N): - delay_pre = 0.1 + round(Dt_max*i/N,1) - syn_dict_stdp={"weight":weight_stdp, "delay":delay_pre, \ - "synapse_group":syn_group, "receptor":1} - + delay_pre = 0.1 + round(Dt_max * i / N, 1) + syn_dict_stdp = { + "weight": weight_stdp, + "delay": delay_pre, + "synapse_group": syn_group, + "receptor": 1, + } + ngpu.Connect([neuron_pre[i]], [neuron_post[j]], conn_dict, syn_dict_stdp) ngpu.Simulate(sim_time) -Wplus = Wmax*lambd +Wplus = Wmax * lambd max_dw_rel = 0 mse = 0 count = 0 @@ -89,29 +102,37 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ w = ngpu.GetStatus(conn_id, "weight") # print("Initial weight: ", weight_stdp) # print("Simulated weight: ", w[0]) - delay_pre = 0.1 + round(Dt_max*i/N,1) - delay_post = 0.1 + round(Dt_max*j/N,1) + delay_pre = 0.1 + round(Dt_max * i / N, 1) + delay_post = 0.1 + round(Dt_max * j / N, 1) Dt = Dt_offset + delay_post - delay_pre - if Dt>=0: - Dt1 = -(Dt_spike - Dt) + if Dt >= 0: + Dt1 = -(Dt_spike - Dt) else: - Dt1 = Dt_spike + Dt - if (Dt > 1.0e-6) | (Dt<-1.0e-6): + Dt1 = Dt_spike + Dt + if (Dt > 1.0e-6) | (Dt < -1.0e-6): w1 = weight_stdp for ispike in range(n_spikes): - w1 = STDPUpdate(w1, Dt, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) - if ispike<n_spikes-1: - w1 = STDPUpdate(w1, Dt1, tau_plus, tau_minus, Wplus, \ - alpha, mu_plus, mu_minus, Wmax) - + w1 = STDPUpdate(w1, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax) + if ispike < n_spikes - 1: + w1 = STDPUpdate( + w1, + Dt1, + tau_plus, + tau_minus, + Wplus, + alpha, + mu_plus, + mu_minus, + Wmax, + ) + # print("Expected theoretical weight: ", w1) - dw_rel = (w1 - w[0])/w1 - mse = mse + (w1 - w[0])**2 + dw_rel = (w1 - w[0]) / w1 + mse = mse + (w1 - w[0]) ** 2 count = count + 1 - # print("dw/w: ", dw_rel) - if abs(dw_rel)>max_dw_rel: + # print("dw/w: ", dw_rel) + if abs(dw_rel) > max_dw_rel: max_dw_rel = abs(dw_rel) -mse = mse/count -print("max abs(dw/w): ", max_dw_rel) -print("rmse: ", math.sqrt(mse)) +mse = mse / count +print("max abs(dw/w): ", max_dw_rel) +print("rmse: ", math.sqrt(mse)) diff --git a/python/test/test_stdp_list.py b/python/test/test_stdp_list.py index e3a4d97d4..add8ff121 100644 --- a/python/test/test_stdp_list.py +++ b/python/test/test_stdp_list.py @@ -1,19 +1,20 @@ -import sys import math +import sys + import nestgpu as ngpu -def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ - Wmax): - if (Dt>=0): - fact = Wplus*math.exp(-Dt/tau_plus) - w1 = w + fact*math.pow(1.0 - w/Wmax, mu_plus) - if w1>Wmax: + +def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, Wmax): + if Dt >= 0: + fact = Wplus * math.exp(-Dt / tau_plus) + w1 = w + fact * math.pow(1.0 - w / Wmax, mu_plus) + if w1 > Wmax: w1 = Wmax - + else: - fact = -alpha*Wplus*math.exp(Dt/tau_minus) - w1 = w + fact*math.pow(w/Wmax, mu_minus) - if w1<0.0: + fact = -alpha * Wplus * math.exp(Dt / tau_minus) + w1 = w + fact * math.pow(w / Wmax, mu_minus) + if w1 < 0.0: w1 = 0.0 return w1 @@ -31,22 +32,30 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ Wmax = 0.001 den_delay = 0.0 -syn_group = ngpu.CreateSynGroup \ - ("stdp", {"tau_plus":tau_plus, "tau_minus":tau_minus, \ - "lambda":lambd, "alpha":alpha, "mu_plus":mu_plus, \ - "mu_minus":mu_minus, "Wmax":Wmax}) - +syn_group = ngpu.CreateSynGroup( + "stdp", + { + "tau_plus": tau_plus, + "tau_minus": tau_minus, + "lambda": lambd, + "alpha": alpha, + "mu_plus": mu_plus, + "mu_minus": mu_minus, + "Wmax": Wmax, + }, +) + sg = ngpu.Create("spike_generator") neuron0 = ngpu.Create("aeif_cond_beta_multisynapse") neuron1 = ngpu.Create("aeif_cond_beta_multisynapse", N) -ngpu.SetStatus(neuron1, {"t_ref": 1000.0, "den_delay":den_delay}) +ngpu.SetStatus(neuron1, {"t_ref": 1000.0, "den_delay": den_delay}) time_diff = 400.0 dt_list = [] delay_stdp_list = [] for i in range(N): - dt_list.append(dt_step*(-0.5*(N-1) + i)) + dt_list.append(dt_step * (-0.5 * (N - 1) + i)) delay_stdp_list.append(time_diff - dt_list[i]) spike_times = [50.0] @@ -55,31 +64,34 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ # set spike times and height -ngpu.SetStatus(sg, {"spike_times": spike_times, "spike_heights":spike_heights}) +ngpu.SetStatus(sg, {"spike_times": spike_times, "spike_heights": spike_heights}) delay0 = 1.0 delay1 = delay0 + time_diff -weight_sg = 17.9 # to make it spike immediately and only once -weight_stdp = Wmax/2 +weight_sg = 17.9 # to make it spike immediately and only once +weight_stdp = Wmax / 2 -conn_dict={"rule": "one_to_one"} -conn_dict_full={"rule": "all_to_all"} -syn_dict0={"weight":weight_sg, "delay":delay0} -syn_dict1={"weight":weight_sg, "delay":delay1} +conn_dict = {"rule": "one_to_one"} +conn_dict_full = {"rule": "all_to_all"} +syn_dict0 = {"weight": weight_sg, "delay": delay0} +syn_dict1 = {"weight": weight_sg, "delay": delay1} ngpu.Connect(sg, neuron0, conn_dict, syn_dict0) ngpu.Connect(sg, neuron1, conn_dict_full, syn_dict1) -syn_dict_stdp={"weight":weight_stdp, "delay_array":delay_stdp_list, \ - "synapse_group":syn_group} +syn_dict_stdp = { + "weight": weight_stdp, + "delay_array": delay_stdp_list, + "synapse_group": syn_group, +} ngpu.Connect(neuron0, neuron1, conn_dict_full, syn_dict_stdp) ngpu.Simulate(1000.0) -#conn_id = ngpu.GetConnections(neuron0, neuron1) +# conn_id = ngpu.GetConnections(neuron0, neuron1) dt = dt_list -#w = ngpu.GetStatus(conn_id, "weight") +# w = ngpu.GetStatus(conn_id, "weight") expect_w = [] @@ -88,26 +100,35 @@ def STDPUpdate(w, Dt, tau_plus, tau_minus, Wplus, alpha, mu_plus, mu_minus, \ for i in range(N): conn_id = ngpu.GetConnections(neuron0, neuron1[i]) w = ngpu.GetStatus(conn_id, "weight") - w1 = STDPUpdate(weight_stdp, dt[i], tau_plus, tau_minus, Wmax*lambd, alpha, \ - mu_plus, mu_minus, Wmax) + w1 = STDPUpdate( + weight_stdp, + dt[i], + tau_plus, + tau_minus, + Wmax * lambd, + alpha, + mu_plus, + mu_minus, + Wmax, + ) expect_w.append(w1) sim_w.append(w[0]) - dw.append(w1-w[0]) - if abs(dw[i])>tolerance: + dw.append(w1 - w[0]) + if abs(dw[i]) > tolerance: print("Expected weight: ", w1, " simulated: ", w) sys.exit(1) sys.exit(0) -#import matplotlib.pyplot as plt +# import matplotlib.pyplot as plt -#plt.figure(1) -#plt.plot(dt, sim_w) +# plt.figure(1) +# plt.plot(dt, sim_w) -#plt.figure(2) -#plt.plot(dt, expect_w) +# plt.figure(2) +# plt.plot(dt, expect_w) -#plt.draw() -#plt.pause(1) -#raw_input("<Hit Enter To Close>") -#plt.close() +# plt.draw() +# plt.pause(1) +# raw_input("<Hit Enter To Close>") +# plt.close() diff --git a/python/test/test_syn_group.py b/python/test/test_syn_group.py index 08785708e..452833385 100644 --- a/python/test/test_syn_group.py +++ b/python/test/test_syn_group.py @@ -1,13 +1,14 @@ import sys + import nestgpu as ngpu -syn_group = ngpu.CreateSynGroup("test_syn_model", {"fact":1.0, "offset":2.0}) +syn_group = ngpu.CreateSynGroup("test_syn_model", {"fact": 1.0, "offset": 2.0}) print(ngpu.GetStatus(syn_group)) ngpu.SetStatus(syn_group, "fact", 3.0) ngpu.SetStatus(syn_group, "offset", 4.0) print(ngpu.GetStatus(syn_group)) -ngpu.SetStatus(syn_group, {"fact":5.0, "offset":6.0}) +ngpu.SetStatus(syn_group, {"fact": 5.0, "offset": 6.0}) print(ngpu.GetStatus(syn_group)) fact = ngpu.GetSynGroupParam(syn_group, "fact") @@ -22,5 +23,3 @@ print(ngpu.GetStatus(syn_group, ["fact", "offset"])) print(ngpu.GetStatus(syn_group, ["offset", "fact"])) - - diff --git a/python/test/test_syn_model.py b/python/test/test_syn_model.py index b0d1e111b..ce7e91b8a 100644 --- a/python/test/test_syn_model.py +++ b/python/test/test_syn_model.py @@ -1,4 +1,5 @@ import sys + import nestgpu as ngpu tolerance = 1.0e-6 @@ -12,13 +13,13 @@ ngpu.SetSynGroupParam(syn_group, "offset", offset) sg = ngpu.Create("spike_generator", N) -neuron = ngpu.Create("aeif_cond_beta_multisynapse", 2*N) +neuron = ngpu.Create("aeif_cond_beta_multisynapse", 2 * N) ngpu.SetStatus(neuron, {"t_ref": 10.0}) neuron0 = neuron[0:N] -neuron1 = neuron[N:2*N] +neuron1 = neuron[N : 2 * N] dt_list = [] for i in range(N): - dt_list.append(dt_step*(-0.5*(N-1) + i)) + dt_list.append(dt_step * (-0.5 * (N - 1) + i)) spike_time = [50.0] spike_height = [1.0] @@ -26,36 +27,39 @@ time_diff = 10.0 # set spike times and height -ngpu.SetStatus(sg, {"spike_times": spike_time, "spike_heights":spike_height}) +ngpu.SetStatus(sg, {"spike_times": spike_time, "spike_heights": spike_height}) delay0 = 1.0 delay1 = delay0 + time_diff weight_sg = 17.9 weight_test = 0.0 -conn_dict={"rule": "one_to_one"} -syn_dict0={"weight":weight_sg, "delay":delay0, "receptor":0, "synapse_group":0} -syn_dict1={"weight":weight_sg, "delay":delay1, "receptor":0, "synapse_group":0} +conn_dict = {"rule": "one_to_one"} +syn_dict0 = {"weight": weight_sg, "delay": delay0, "receptor": 0, "synapse_group": 0} +syn_dict1 = {"weight": weight_sg, "delay": delay1, "receptor": 0, "synapse_group": 0} ngpu.Connect(sg, neuron0, conn_dict, syn_dict0) ngpu.Connect(sg, neuron1, conn_dict, syn_dict1) for i in range(N): delay_test = time_diff - dt_list[i] - syn_dict_test={"weight":weight_test, "delay":delay_test, "receptor":0, \ - "synapse_group":syn_group} + syn_dict_test = { + "weight": weight_test, + "delay": delay_test, + "receptor": 0, + "synapse_group": syn_group, + } ngpu.Connect([neuron0[i]], [neuron1[i]], conn_dict, syn_dict_test) ngpu.Simulate(200.0) conn_id = ngpu.GetConnections(neuron0, neuron1) conn_status_dict = ngpu.GetStatus(conn_id, ["weight", "delay"]) -#print (conn_status_dict) +# print (conn_status_dict) for i in range(N): - #print dt_list[i], conn_status_dict[i][0] - expect_w = dt_list[i]*fact + offset - if abs(expect_w - conn_status_dict[i][0])>tolerance: - print("Expected weight: ", expect_w, " simulated: ", \ - conn_status_dict[i][0]) + # print dt_list[i], conn_status_dict[i][0] + expect_w = dt_list[i] * fact + offset + if abs(expect_w - conn_status_dict[i][0]) > tolerance: + print("Expected weight: ", expect_w, " simulated: ", conn_status_dict[i][0]) sys.exit(1) sys.exit(0) diff --git a/python/test/test_t_ref.py b/python/test/test_t_ref.py index 98d7ddbb9..40505b78b 100644 --- a/python/test/test_t_ref.py +++ b/python/test/test_t_ref.py @@ -1,15 +1,15 @@ import nestgpu as ngpu neuron = ngpu.Create("aeif_cond_beta_multisynapse", 1, 1) -ngpu.SetStatus(neuron, {"t_ref":100.0, "I_e":1000.0}) +ngpu.SetStatus(neuron, {"t_ref": 100.0, "I_e": 1000.0}) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) ngpu.Simulate() data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] import matplotlib.pyplot as plt diff --git a/python/test/test_user_m1.py b/python/test/test_user_m1.py index a238f6c4a..7c3fc24d7 100644 --- a/python/test/test_user_m1.py +++ b/python/test/test_user_m1.py @@ -1,27 +1,34 @@ import sys + import nestgpu as ngpu -neuron = ngpu.Create('user_m1', 1, 3) -ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b":80.5, - "E_L":-70.6, "g_L":300.0}) -ngpu.SetStatus(neuron, {'E_rev':[20.0, 0.0, -85.0], - 'tau_decay':[40.0, 20.0, 30.0], - 'tau_rise':[20.0, 10.0, 5.0]}) +neuron = ngpu.Create("user_m1", 1, 3) +ngpu.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b": 80.5, "E_L": -70.6, "g_L": 300.0}) +ngpu.SetStatus( + neuron, + { + "E_rev": [20.0, 0.0, -85.0], + "tau_decay": [40.0, 20.0, 30.0], + "tau_rise": [20.0, 10.0, 5.0], + }, +) spike = ngpu.Create("spike_generator") spike_times = [10.0, 400.0] spike_heights = [1.0, 0.5] n_spikes = 2 # set spike times and height -ngpu.SetStatus(spike, {"spike_times": spike_times, \ - "spike_heights":spike_heights}) +ngpu.SetStatus(spike, {"spike_times": spike_times, "spike_heights": spike_heights}) delay = [1.0, 100.0, 130.0] weight = [0.1, 0.2, 0.15] -conn_spec={"rule": "all_to_all"} +conn_spec = {"rule": "all_to_all"} for syn in range(3): - syn_spec={ #'model': 'static_synapse', 'receptor_type': syn, - 'receptor': syn, 'weight': weight[syn], 'delay': delay[syn]} + syn_spec = { #'model': 'static_synapse', 'receptor_type': syn, + "receptor": syn, + "weight": weight[syn], + "delay": delay[syn], + } ngpu.Connect(spike, neuron, conn_spec, syn_spec) record = ngpu.CreateRecord("", ["V_m"], [neuron[0]], [0]) @@ -29,8 +36,8 @@ ngpu.Simulate(800.0) data_list = ngpu.GetRecordData(record) -t=[row[0] for row in data_list] -V_m=[row[1] for row in data_list] +t = [row[0] for row in data_list] +V_m = [row[1] for row in data_list] import matplotlib.pyplot as plt diff --git a/pythonlib/Makefile.in b/pythonlib/Makefile.in index 74005c307..2bf840ac5 100644 --- a/pythonlib/Makefile.in +++ b/pythonlib/Makefile.in @@ -109,11 +109,11 @@ am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = +am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ -am__v_at_1 = +am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ diff --git a/pythonlib/nestgpu.py b/pythonlib/nestgpu.py index 6d10acdea..9406d9255 100644 --- a/pythonlib/nestgpu.py +++ b/pythonlib/nestgpu.py @@ -1,21 +1,22 @@ """ Python interface for NESTGPU""" -import sys, platform -import ctypes, ctypes.util +import ctypes +import ctypes.util +import gc import os +import platform +import sys import unicodedata -import gc - -print('\n -- NEST GPU --\n') -print(' Copyright (C) 2021 The NEST Initiative\n') -print(' This program is provided AS IS and comes with') -print(' NO WARRANTY. See the file LICENSE for details.\n') -print(' Homepage: https://github.com/nest/nest-gpu') +print("\n -- NEST GPU --\n") +print(" Copyright (C) 2021 The NEST Initiative\n") +print(" This program is provided AS IS and comes with") +print(" NO WARRANTY. See the file LICENSE for details.\n") +print(" Homepage: https://github.com/nest/nest-gpu") print() -lib_path=os.environ["NESTGPU_LIB"] -_nestgpu=ctypes.CDLL(lib_path) +lib_path = os.environ["NESTGPU_LIB"] +_nestgpu = ctypes.CDLL(lib_path) c_float_p = ctypes.POINTER(ctypes.c_float) c_int_p = ctypes.POINTER(ctypes.c_int) @@ -25,6 +26,7 @@ c_float_pp = ctypes.POINTER(ctypes.POINTER(ctypes.c_float)) c_float_ppp = ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_float))) + class NodeSeq(object): def __init__(self, i0, n=1): if i0 == None: @@ -34,93 +36,119 @@ def __init__(self, i0, n=1): self.n = n def Subseq(self, first, last): - if last<0 and last>=-self.n: - last = last%self.n - if first<0 | last<first: + if last < 0 and last >= -self.n: + last = last % self.n + if first < 0 | last < first: raise ValueError("Sequence subset range error") - if last>=self.n: + if last >= self.n: raise ValueError("Sequence subset out of range") return NodeSeq(self.i0 + first, last - first + 1) + def __getitem__(self, i): - if type(i)==slice: + if type(i) == slice: if i.step != None: raise ValueError("Subsequence cannot have a step") - return self.Subseq(i.start, i.stop-1) - - if i<-self.n: + return self.Subseq(i.start, i.stop - 1) + + if i < -self.n: raise ValueError("Sequence index error") - if i>=self.n: + if i >= self.n: raise ValueError("Sequence index out of range") - if i<0: - i = i%self.n + if i < 0: + i = i % self.n return self.i0 + i + def ToList(self): return list(range(self.i0, self.i0 + self.n)) + def __len__(self): return self.n + class RemoteNodeSeq(object): def __init__(self, i_host=0, node_seq=NodeSeq(None)): self.i_host = i_host self.node_seq = node_seq + class ConnectionId(object): def __init__(self, i_source, i_group, i_conn): self.i_source = i_source self.i_group = i_group self.i_conn = i_conn + class SynGroup(object): def __init__(self, i_syn_group): self.i_syn_group = i_syn_group + def to_byte_str(s): - if type(s)==str: - return s.encode('ascii') - elif type(s)==bytes: + if type(s) == str: + return s.encode("ascii") + elif type(s) == bytes: return s else: raise ValueError("Variable cannot be converted to string") + def to_def_str(s): - if (sys.version_info >= (3, 0)): + if sys.version_info >= (3, 0): return s.decode("utf-8") else: return s + def waitenter(val): - if (sys.version_info >= (3, 0)): + if sys.version_info >= (3, 0): return input(val) else: return raw_input(val) - -conn_rule_name = ("one_to_one", "all_to_all", "fixed_total_number", - "fixed_indegree", "fixed_outdegree") - + + +conn_rule_name = ( + "one_to_one", + "all_to_all", + "fixed_total_number", + "fixed_indegree", + "fixed_outdegree", +) + NESTGPU_GetErrorMessage = _nestgpu.NESTGPU_GetErrorMessage NESTGPU_GetErrorMessage.restype = ctypes.POINTER(ctypes.c_char) + + def GetErrorMessage(): "Get error message from NESTGPU exception" message = ctypes.cast(NESTGPU_GetErrorMessage(), ctypes.c_char_p).value return message - + + NESTGPU_GetErrorCode = _nestgpu.NESTGPU_GetErrorCode NESTGPU_GetErrorCode.restype = ctypes.c_ubyte + + def GetErrorCode(): "Get error code from NESTGPU exception" return NESTGPU_GetErrorCode() - + + NESTGPU_SetOnException = _nestgpu.NESTGPU_SetOnException NESTGPU_SetOnException.argtypes = (ctypes.c_int,) + + def SetOnException(on_exception): "Define whether handle exceptions (1) or exit (0) in case of errors" return NESTGPU_SetOnException(ctypes.c_int(on_exception)) + SetOnException(1) NESTGPU_SetRandomSeed = _nestgpu.NESTGPU_SetRandomSeed NESTGPU_SetRandomSeed.argtypes = (ctypes.c_ulonglong,) NESTGPU_SetRandomSeed.restype = ctypes.c_int + + def SetRandomSeed(seed): "Set seed for random number generation" ret = NESTGPU_SetRandomSeed(ctypes.c_ulonglong(seed)) @@ -132,6 +160,8 @@ def SetRandomSeed(seed): NESTGPU_SetTimeResolution = _nestgpu.NESTGPU_SetTimeResolution NESTGPU_SetTimeResolution.argtypes = (ctypes.c_float,) NESTGPU_SetTimeResolution.restype = ctypes.c_int + + def SetTimeResolution(time_res): "Set time resolution in ms" ret = NESTGPU_SetTimeResolution(ctypes.c_float(time_res)) @@ -139,8 +169,11 @@ def SetTimeResolution(time_res): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetTimeResolution = _nestgpu.NESTGPU_GetTimeResolution NESTGPU_GetTimeResolution.restype = ctypes.c_float + + def GetTimeResolution(): "Get time resolution in ms" ret = NESTGPU_GetTimeResolution() @@ -152,6 +185,8 @@ def GetTimeResolution(): NESTGPU_SetMaxSpikeBufferSize = _nestgpu.NESTGPU_SetMaxSpikeBufferSize NESTGPU_SetMaxSpikeBufferSize.argtypes = (ctypes.c_int,) NESTGPU_SetMaxSpikeBufferSize.restype = ctypes.c_int + + def SetMaxSpikeBufferSize(max_size): "Set maximum size of spike buffer per node" ret = NESTGPU_SetMaxSpikeBufferSize(ctypes.c_int(max_size)) @@ -162,6 +197,8 @@ def SetMaxSpikeBufferSize(max_size): NESTGPU_GetMaxSpikeBufferSize = _nestgpu.NESTGPU_GetMaxSpikeBufferSize NESTGPU_GetMaxSpikeBufferSize.restype = ctypes.c_int + + def GetMaxSpikeBufferSize(): "Get maximum size of spike buffer per node" ret = NESTGPU_GetMaxSpikeBufferSize() @@ -173,6 +210,8 @@ def GetMaxSpikeBufferSize(): NESTGPU_SetSimTime = _nestgpu.NESTGPU_SetSimTime NESTGPU_SetSimTime.argtypes = (ctypes.c_float,) NESTGPU_SetSimTime.restype = ctypes.c_int + + def SetSimTime(sim_time): "Set neural activity simulated time in ms" ret = NESTGPU_SetSimTime(ctypes.c_float(sim_time)) @@ -184,6 +223,8 @@ def SetSimTime(sim_time): NESTGPU_SetVerbosityLevel = _nestgpu.NESTGPU_SetVerbosityLevel NESTGPU_SetVerbosityLevel.argtypes = (ctypes.c_int,) NESTGPU_SetVerbosityLevel.restype = ctypes.c_int + + def SetVerbosityLevel(verbosity_level): "Set verbosity level" ret = NESTGPU_SetVerbosityLevel(ctypes.c_int(verbosity_level)) @@ -195,18 +236,20 @@ def SetVerbosityLevel(verbosity_level): NESTGPU_Create = _nestgpu.NESTGPU_Create NESTGPU_Create.argtypes = (c_char_p, ctypes.c_int, ctypes.c_int) NESTGPU_Create.restype = ctypes.c_int + + def Create(model_name, n_node=1, n_ports=1, status_dict=None): "Create a neuron group" - if (type(status_dict)==dict): + if type(status_dict) == dict: node_group = Create(model_name, n_node, n_ports) SetStatus(node_group, status_dict) return node_group - - elif status_dict!=None: + + elif status_dict != None: raise ValueError("Wrong argument in Create") - - c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), len(model_name)+1) - i_node =NESTGPU_Create(c_model_name, ctypes.c_int(n_node), ctypes.c_int(n_ports)) + + c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), len(model_name) + 1) + i_node = NESTGPU_Create(c_model_name, ctypes.c_int(n_node), ctypes.c_int(n_ports)) ret = NodeSeq(i_node, n_node) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -216,9 +259,11 @@ def Create(model_name, n_node=1, n_ports=1, status_dict=None): NESTGPU_CreatePoissonGenerator = _nestgpu.NESTGPU_CreatePoissonGenerator NESTGPU_CreatePoissonGenerator.argtypes = (ctypes.c_int, ctypes.c_float) NESTGPU_CreatePoissonGenerator.restype = ctypes.c_int + + def CreatePoissonGenerator(n_node, rate): "Create a poisson-distributed spike generator" - i_node = NESTGPU_CreatePoissonGenerator(ctypes.c_int(n_node), ctypes.c_float(rate)) + i_node = NESTGPU_CreatePoissonGenerator(ctypes.c_int(n_node), ctypes.c_float(rate)) ret = NodeSeq(i_node, n_node) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -226,24 +271,34 @@ def CreatePoissonGenerator(n_node, rate): NESTGPU_CreateRecord = _nestgpu.NESTGPU_CreateRecord -NESTGPU_CreateRecord.argtypes = (c_char_p, ctypes.POINTER(c_char_p), c_int_p, c_int_p, ctypes.c_int) +NESTGPU_CreateRecord.argtypes = ( + c_char_p, + ctypes.POINTER(c_char_p), + c_int_p, + c_int_p, + ctypes.c_int, +) NESTGPU_CreateRecord.restype = ctypes.c_int + + def CreateRecord(file_name, var_name_list, i_node_list, i_port_list): "Create a record of neuron variables" n_node = len(i_node_list) - c_file_name = ctypes.create_string_buffer(to_byte_str(file_name), len(file_name)+1) + c_file_name = ctypes.create_string_buffer(to_byte_str(file_name), len(file_name) + 1) array_int_type = ctypes.c_int * n_node array_char_pt_type = c_char_p * n_node - c_var_name_list=[] + c_var_name_list = [] for i in range(n_node): - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name_list[i]), len(var_name_list[i])+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name_list[i]), len(var_name_list[i]) + 1) c_var_name_list.append(c_var_name) - ret = NESTGPU_CreateRecord(c_file_name, - array_char_pt_type(*c_var_name_list), - array_int_type(*i_node_list), - array_int_type(*i_port_list), - ctypes.c_int(n_node)) + ret = NESTGPU_CreateRecord( + c_file_name, + array_char_pt_type(*c_var_name_list), + array_int_type(*i_node_list), + array_int_type(*i_port_list), + ctypes.c_int(n_node), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -252,6 +307,8 @@ def CreateRecord(file_name, var_name_list, i_node_list, i_port_list): NESTGPU_GetRecordDataRows = _nestgpu.NESTGPU_GetRecordDataRows NESTGPU_GetRecordDataRows.argtypes = (ctypes.c_int,) NESTGPU_GetRecordDataRows.restype = ctypes.c_int + + def GetRecordDataRows(i_record): "Get record n. of rows" ret = NESTGPU_GetRecordDataRows(ctypes.c_int(i_record)) @@ -263,6 +320,8 @@ def GetRecordDataRows(i_record): NESTGPU_GetRecordDataColumns = _nestgpu.NESTGPU_GetRecordDataColumns NESTGPU_GetRecordDataColumns.argtypes = (ctypes.c_int,) NESTGPU_GetRecordDataColumns.restype = ctypes.c_int + + def GetRecordDataColumns(i_record): "Get record n. of columns" ret = NESTGPU_GetRecordDataColumns(ctypes.c_int(i_record)) @@ -274,6 +333,8 @@ def GetRecordDataColumns(i_record): NESTGPU_GetRecordData = _nestgpu.NESTGPU_GetRecordData NESTGPU_GetRecordData.argtypes = (ctypes.c_int,) NESTGPU_GetRecordData.restype = ctypes.POINTER(c_float_p) + + def GetRecordData(i_record): "Get record data" data_arr_pt = NESTGPU_GetRecordData(ctypes.c_int(i_record)) @@ -284,85 +345,111 @@ def GetRecordData(i_record): row_list = [] for ic in range(nc): row_list.append(data_arr_pt[ir][ic]) - + data_list.append(row_list) - - ret = data_list + + ret = data_list if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronScalParam = _nestgpu.NESTGPU_SetNeuronScalParam -NESTGPU_SetNeuronScalParam.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, ctypes.c_float) +NESTGPU_SetNeuronScalParam.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_char_p, + ctypes.c_float, +) NESTGPU_SetNeuronScalParam.restype = ctypes.c_int + + def SetNeuronScalParam(i_node, n_node, param_name, val): "Set neuron scalar parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = NESTGPU_SetNeuronScalParam(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_param_name, - ctypes.c_float(val)) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SetNeuronScalParam(ctypes.c_int(i_node), ctypes.c_int(n_node), c_param_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronArrayParam = _nestgpu.NESTGPU_SetNeuronArrayParam -NESTGPU_SetNeuronArrayParam.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, c_float_p, ctypes.c_int) +NESTGPU_SetNeuronArrayParam.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_char_p, + c_float_p, + ctypes.c_int, +) NESTGPU_SetNeuronArrayParam.restype = ctypes.c_int + + def SetNeuronArrayParam(i_node, n_node, param_name, param_list): "Set neuron array parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) array_size = len(param_list) array_float_type = ctypes.c_float * array_size - ret = NESTGPU_SetNeuronArrayParam(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_param_name, - array_float_type(*param_list), - ctypes.c_int(array_size)) + ret = NESTGPU_SetNeuronArrayParam( + ctypes.c_int(i_node), + ctypes.c_int(n_node), + c_param_name, + array_float_type(*param_list), + ctypes.c_int(array_size), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronPtScalParam = _nestgpu.NESTGPU_SetNeuronPtScalParam -NESTGPU_SetNeuronPtScalParam.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p, ctypes.c_float) +NESTGPU_SetNeuronPtScalParam.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + c_char_p, + ctypes.c_float, +) NESTGPU_SetNeuronPtScalParam.restype = ctypes.c_int + + def SetNeuronPtScalParam(nodes, param_name, val): "Set neuron list scalar parameter value" n_node = len(nodes) - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - ret = NESTGPU_SetNeuronPtScalParam(node_pt, - ctypes.c_int(n_node), c_param_name, - ctypes.c_float(val)) + ret = NESTGPU_SetNeuronPtScalParam(node_pt, ctypes.c_int(n_node), c_param_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronPtArrayParam = _nestgpu.NESTGPU_SetNeuronPtArrayParam -NESTGPU_SetNeuronPtArrayParam.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p, c_float_p, - ctypes.c_int) +NESTGPU_SetNeuronPtArrayParam.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + c_char_p, + c_float_p, + ctypes.c_int, +) NESTGPU_SetNeuronPtArrayParam.restype = ctypes.c_int + + def SetNeuronPtArrayParam(nodes, param_name, param_list): "Set neuron list array parameter value" n_node = len(nodes) - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - + array_size = len(param_list) array_float_type = ctypes.c_float * array_size - ret = NESTGPU_SetNeuronPtArrayParam(node_pt, - ctypes.c_int(n_node), - c_param_name, - array_float_type(*param_list), - ctypes.c_int(array_size)) + ret = NESTGPU_SetNeuronPtArrayParam( + node_pt, + ctypes.c_int(n_node), + c_param_name, + array_float_type(*param_list), + ctypes.c_int(array_size), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -371,11 +458,12 @@ def SetNeuronPtArrayParam(nodes, param_name, param_list): NESTGPU_IsNeuronScalParam = _nestgpu.NESTGPU_IsNeuronScalParam NESTGPU_IsNeuronScalParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronScalParam.restype = ctypes.c_int + + def IsNeuronScalParam(i_node, param_name): "Check name of neuron scalar parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = (NESTGPU_IsNeuronScalParam(ctypes.c_int(i_node), c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsNeuronScalParam(ctypes.c_int(i_node), c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -384,144 +472,179 @@ def IsNeuronScalParam(i_node, param_name): NESTGPU_IsNeuronPortParam = _nestgpu.NESTGPU_IsNeuronPortParam NESTGPU_IsNeuronPortParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronPortParam.restype = ctypes.c_int + + def IsNeuronPortParam(i_node, param_name): "Check name of neuron scalar parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_IsNeuronPortParam(ctypes.c_int(i_node), c_param_name)!= 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsNeuronPortParam(ctypes.c_int(i_node), c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_IsNeuronArrayParam = _nestgpu.NESTGPU_IsNeuronArrayParam NESTGPU_IsNeuronArrayParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronArrayParam.restype = ctypes.c_int + + def IsNeuronArrayParam(i_node, param_name): "Check name of neuron scalar parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_IsNeuronArrayParam(ctypes.c_int(i_node), c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsNeuronArrayParam(ctypes.c_int(i_node), c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_IsNeuronGroupParam = _nestgpu.NESTGPU_IsNeuronGroupParam NESTGPU_IsNeuronGroupParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronGroupParam.restype = ctypes.c_int + + def IsNeuronGroupParam(i_node, param_name): "Check name of neuron scalar parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = (NESTGPU_IsNeuronGroupParam(ctypes.c_int(i_node), c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsNeuronGroupParam(ctypes.c_int(i_node), c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronIntVar = _nestgpu.NESTGPU_SetNeuronIntVar -NESTGPU_SetNeuronIntVar.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, ctypes.c_int) +NESTGPU_SetNeuronIntVar.argtypes = (ctypes.c_int, ctypes.c_int, c_char_p, ctypes.c_int) NESTGPU_SetNeuronIntVar.restype = ctypes.c_int + + def SetNeuronIntVar(i_node, n_node, var_name, val): "Set neuron integer variable value" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) - ret = NESTGPU_SetNeuronIntVar(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_var_name, - ctypes.c_int(val)) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_SetNeuronIntVar(ctypes.c_int(i_node), ctypes.c_int(n_node), c_var_name, ctypes.c_int(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronScalVar = _nestgpu.NESTGPU_SetNeuronScalVar -NESTGPU_SetNeuronScalVar.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, ctypes.c_float) +NESTGPU_SetNeuronScalVar.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_char_p, + ctypes.c_float, +) NESTGPU_SetNeuronScalVar.restype = ctypes.c_int + + def SetNeuronScalVar(i_node, n_node, var_name, val): "Set neuron scalar variable value" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) - ret = NESTGPU_SetNeuronScalVar(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_var_name, - ctypes.c_float(val)) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_SetNeuronScalVar(ctypes.c_int(i_node), ctypes.c_int(n_node), c_var_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronArrayVar = _nestgpu.NESTGPU_SetNeuronArrayVar -NESTGPU_SetNeuronArrayVar.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, c_float_p, ctypes.c_int) +NESTGPU_SetNeuronArrayVar.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_char_p, + c_float_p, + ctypes.c_int, +) NESTGPU_SetNeuronArrayVar.restype = ctypes.c_int + + def SetNeuronArrayVar(i_node, n_node, var_name, var_list): "Set neuron array variable value" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) array_size = len(var_list) array_float_type = ctypes.c_float * array_size - ret = NESTGPU_SetNeuronArrayVar(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_var_name, - array_float_type(*var_list), - ctypes.c_int(array_size)) + ret = NESTGPU_SetNeuronArrayVar( + ctypes.c_int(i_node), + ctypes.c_int(n_node), + c_var_name, + array_float_type(*var_list), + ctypes.c_int(array_size), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronPtIntVar = _nestgpu.NESTGPU_SetNeuronPtIntVar -NESTGPU_SetNeuronPtIntVar.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p, ctypes.c_int) +NESTGPU_SetNeuronPtIntVar.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + c_char_p, + ctypes.c_int, +) NESTGPU_SetNeuronPtIntVar.restype = ctypes.c_int + + def SetNeuronPtIntVar(nodes, var_name, val): "Set neuron list integer variable value" n_node = len(nodes) - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - ret = NESTGPU_SetNeuronPtIntVar(node_pt, - ctypes.c_int(n_node), c_var_name, - ctypes.c_int(val)) + ret = NESTGPU_SetNeuronPtIntVar(node_pt, ctypes.c_int(n_node), c_var_name, ctypes.c_int(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronPtScalVar = _nestgpu.NESTGPU_SetNeuronPtScalVar -NESTGPU_SetNeuronPtScalVar.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p, ctypes.c_float) +NESTGPU_SetNeuronPtScalVar.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + c_char_p, + ctypes.c_float, +) NESTGPU_SetNeuronPtScalVar.restype = ctypes.c_int + + def SetNeuronPtScalVar(nodes, var_name, val): "Set neuron list scalar variable value" n_node = len(nodes) - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - ret = NESTGPU_SetNeuronPtScalVar(node_pt, - ctypes.c_int(n_node), c_var_name, - ctypes.c_float(val)) + ret = NESTGPU_SetNeuronPtScalVar(node_pt, ctypes.c_int(n_node), c_var_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronPtArrayVar = _nestgpu.NESTGPU_SetNeuronPtArrayVar -NESTGPU_SetNeuronPtArrayVar.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p, c_float_p, - ctypes.c_int) +NESTGPU_SetNeuronPtArrayVar.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + c_char_p, + c_float_p, + ctypes.c_int, +) NESTGPU_SetNeuronPtArrayVar.restype = ctypes.c_int + + def SetNeuronPtArrayVar(nodes, var_name, var_list): "Set neuron list array variable value" n_node = len(nodes) - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) array_size = len(var_list) array_float_type = ctypes.c_float * array_size - ret = NESTGPU_SetNeuronPtArrayVar(node_pt, - ctypes.c_int(n_node), - c_var_name, - array_float_type(*var_list), - ctypes.c_int(array_size)) + ret = NESTGPU_SetNeuronPtArrayVar( + node_pt, + ctypes.c_int(n_node), + c_var_name, + array_float_type(*var_list), + ctypes.c_int(array_size), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -530,11 +653,12 @@ def SetNeuronPtArrayVar(nodes, var_name, var_list): NESTGPU_IsNeuronIntVar = _nestgpu.NESTGPU_IsNeuronIntVar NESTGPU_IsNeuronIntVar.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronIntVar.restype = ctypes.c_int + + def IsNeuronIntVar(i_node, var_name): "Check name of neuron integer variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - ret = (NESTGPU_IsNeuronIntVar(ctypes.c_int(i_node), c_var_name)!=0) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_IsNeuronIntVar(ctypes.c_int(i_node), c_var_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -543,11 +667,12 @@ def IsNeuronIntVar(i_node, var_name): NESTGPU_IsNeuronScalVar = _nestgpu.NESTGPU_IsNeuronScalVar NESTGPU_IsNeuronScalVar.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronScalVar.restype = ctypes.c_int + + def IsNeuronScalVar(i_node, var_name): "Check name of neuron scalar variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - ret = (NESTGPU_IsNeuronScalVar(ctypes.c_int(i_node), c_var_name)!=0) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_IsNeuronScalVar(ctypes.c_int(i_node), c_var_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -556,21 +681,26 @@ def IsNeuronScalVar(i_node, var_name): NESTGPU_IsNeuronPortVar = _nestgpu.NESTGPU_IsNeuronPortVar NESTGPU_IsNeuronPortVar.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronPortVar.restype = ctypes.c_int + + def IsNeuronPortVar(i_node, var_name): "Check name of neuron scalar variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) - ret = (NESTGPU_IsNeuronPortVar(ctypes.c_int(i_node), c_var_name)!= 0) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_IsNeuronPortVar(ctypes.c_int(i_node), c_var_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_IsNeuronArrayVar = _nestgpu.NESTGPU_IsNeuronArrayVar NESTGPU_IsNeuronArrayVar.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsNeuronArrayVar.restype = ctypes.c_int + + def IsNeuronArrayVar(i_node, var_name): "Check name of neuron array variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) - ret = (NESTGPU_IsNeuronArrayVar(ctypes.c_int(i_node), c_var_name)!=0) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + ret = NESTGPU_IsNeuronArrayVar(ctypes.c_int(i_node), c_var_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -579,82 +709,84 @@ def IsNeuronArrayVar(i_node, var_name): NESTGPU_GetNeuronParamSize = _nestgpu.NESTGPU_GetNeuronParamSize NESTGPU_GetNeuronParamSize.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetNeuronParamSize.restype = ctypes.c_int + + def GetNeuronParamSize(i_node, param_name): "Get neuron parameter array size" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = NESTGPU_GetNeuronParamSize(ctypes.c_int(i_node), c_param_name) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_GetNeuronParamSize(ctypes.c_int(i_node), c_param_name) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_GetNeuronParam = _nestgpu.NESTGPU_GetNeuronParam -NESTGPU_GetNeuronParam.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronParam.argtypes = (ctypes.c_int, ctypes.c_int, c_char_p) NESTGPU_GetNeuronParam.restype = c_float_p + + def GetNeuronParam(i_node, n_node, param_name): "Get neuron parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - data_pt = NESTGPU_GetNeuronParam(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_param_name) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + data_pt = NESTGPU_GetNeuronParam(ctypes.c_int(i_node), ctypes.c_int(n_node), c_param_name) array_size = GetNeuronParamSize(i_node, param_name) data_list = [] for i_node in range(n_node): - if (array_size>1): + if array_size > 1: row_list = [] for i in range(array_size): - row_list.append(data_pt[i_node*array_size + i]) + row_list.append(data_pt[i_node * array_size + i]) else: row_list = data_pt[i_node] data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_GetNeuronPtParam = _nestgpu.NESTGPU_GetNeuronPtParam -NESTGPU_GetNeuronPtParam.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronPtParam.argtypes = (ctypes.c_void_p, ctypes.c_int, c_char_p) NESTGPU_GetNeuronPtParam.restype = c_float_p + + def GetNeuronPtParam(nodes, param_name): "Get neuron list scalar parameter value" n_node = len(nodes) - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - data_pt = NESTGPU_GetNeuronPtParam(node_pt, - ctypes.c_int(n_node), c_param_name) + data_pt = NESTGPU_GetNeuronPtParam(node_pt, ctypes.c_int(n_node), c_param_name) array_size = GetNeuronParamSize(nodes[0], param_name) data_list = [] for i_node in range(n_node): - if (array_size>1): + if array_size > 1: row_list = [] for i in range(array_size): - row_list.append(data_pt[i_node*array_size + i]) + row_list.append(data_pt[i_node * array_size + i]) else: row_list = data_pt[i_node] data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetArrayParam = _nestgpu.NESTGPU_GetArrayParam NESTGPU_GetArrayParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetArrayParam.restype = c_float_p + + def GetArrayParam(i_node, n_node, param_name): "Get neuron array parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) data_list = [] for j_node in range(n_node): i_node1 = i_node + j_node @@ -664,17 +796,17 @@ def GetArrayParam(i_node, n_node, param_name): for i in range(array_size): row_list.append(data_pt[i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + def GetNeuronListArrayParam(node_list, param_name): "Get neuron array parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) data_list = [] for i_node in node_list: row_list = [] @@ -683,9 +815,9 @@ def GetNeuronListArrayParam(node_list, param_name): for i in range(array_size): row_list.append(data_pt[i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -694,24 +826,26 @@ def GetNeuronListArrayParam(node_list, param_name): NESTGPU_GetNeuronGroupParam = _nestgpu.NESTGPU_GetNeuronGroupParam NESTGPU_GetNeuronGroupParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetNeuronGroupParam.restype = ctypes.c_float + + def GetNeuronGroupParam(i_node, param_name): "Check name of neuron group parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_GetNeuronGroupParam(ctypes.c_int(i_node), c_param_name) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - -#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx NESTGPU_GetNeuronVarSize = _nestgpu.NESTGPU_GetNeuronVarSize NESTGPU_GetNeuronVarSize.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetNeuronVarSize.restype = ctypes.c_int + + def GetNeuronVarSize(i_node, var_name): "Get neuron variable array size" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) ret = NESTGPU_GetNeuronVarSize(ctypes.c_int(i_node), c_var_name) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -719,119 +853,118 @@ def GetNeuronVarSize(i_node, var_name): NESTGPU_GetNeuronIntVar = _nestgpu.NESTGPU_GetNeuronIntVar -NESTGPU_GetNeuronIntVar.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronIntVar.argtypes = (ctypes.c_int, ctypes.c_int, c_char_p) NESTGPU_GetNeuronIntVar.restype = c_int_p + + def GetNeuronIntVar(i_node, n_node, var_name): "Get neuron integer variable value" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - data_pt = NESTGPU_GetNeuronIntVar(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_var_name) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + data_pt = NESTGPU_GetNeuronIntVar(ctypes.c_int(i_node), ctypes.c_int(n_node), c_var_name) data_list = [] for i_node in range(n_node): data_list.append([data_pt[i_node]]) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_GetNeuronVar = _nestgpu.NESTGPU_GetNeuronVar -NESTGPU_GetNeuronVar.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronVar.argtypes = (ctypes.c_int, ctypes.c_int, c_char_p) NESTGPU_GetNeuronVar.restype = c_float_p + + def GetNeuronVar(i_node, n_node, var_name): "Get neuron variable value" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - data_pt = NESTGPU_GetNeuronVar(ctypes.c_int(i_node), - ctypes.c_int(n_node), c_var_name) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + data_pt = NESTGPU_GetNeuronVar(ctypes.c_int(i_node), ctypes.c_int(n_node), c_var_name) array_size = GetNeuronVarSize(i_node, var_name) data_list = [] for i_node in range(n_node): - if (array_size>1): + if array_size > 1: row_list = [] for i in range(array_size): - row_list.append(data_pt[i_node*array_size + i]) + row_list.append(data_pt[i_node * array_size + i]) else: row_list = data_pt[i_node] data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_GetNeuronPtIntVar = _nestgpu.NESTGPU_GetNeuronPtIntVar -NESTGPU_GetNeuronPtIntVar.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronPtIntVar.argtypes = (ctypes.c_void_p, ctypes.c_int, c_char_p) NESTGPU_GetNeuronPtIntVar.restype = c_int_p + + def GetNeuronPtIntVar(nodes, var_name): "Get neuron list integer variable value" n_node = len(nodes) - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - data_pt = NESTGPU_GetNeuronPtIntVar(node_pt, - ctypes.c_int(n_node), c_var_name) + data_pt = NESTGPU_GetNeuronPtIntVar(node_pt, ctypes.c_int(n_node), c_var_name) data_list = [] for i_node in range(n_node): data_list.append([data_pt[i_node]]) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetNeuronPtVar = _nestgpu.NESTGPU_GetNeuronPtVar -NESTGPU_GetNeuronPtVar.argtypes = (ctypes.c_void_p, ctypes.c_int, - c_char_p) +NESTGPU_GetNeuronPtVar.argtypes = (ctypes.c_void_p, ctypes.c_int, c_char_p) NESTGPU_GetNeuronPtVar.restype = c_float_p + + def GetNeuronPtVar(nodes, var_name): "Get neuron list scalar variable value" n_node = len(nodes) - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) node_arr = (ctypes.c_int * len(nodes))(*nodes) node_pt = ctypes.cast(node_arr, ctypes.c_void_p) - data_pt = NESTGPU_GetNeuronPtVar(node_pt, - ctypes.c_int(n_node), c_var_name) + data_pt = NESTGPU_GetNeuronPtVar(node_pt, ctypes.c_int(n_node), c_var_name) array_size = GetNeuronVarSize(nodes[0], var_name) data_list = [] for i_node in range(n_node): - if (array_size>1): + if array_size > 1: row_list = [] for i in range(array_size): - row_list.append(data_pt[i_node*array_size + i]) + row_list.append(data_pt[i_node * array_size + i]) else: row_list = data_pt[i_node] data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetArrayVar = _nestgpu.NESTGPU_GetArrayVar NESTGPU_GetArrayVar.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetArrayVar.restype = c_float_p + + def GetArrayVar(i_node, n_node, var_name): "Get neuron array variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) data_list = [] for j_node in range(n_node): i_node1 = i_node + j_node @@ -841,9 +974,9 @@ def GetArrayVar(i_node, n_node, var_name): for i in range(array_size): row_list.append(data_pt[i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -851,8 +984,7 @@ def GetArrayVar(i_node, n_node, var_name): def GetNeuronListArrayVar(node_list, var_name): "Get neuron array variable" - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) data_list = [] for i_node in node_list: row_list = [] @@ -861,47 +993,43 @@ def GetNeuronListArrayVar(node_list, var_name): for i in range(array_size): row_list.append(data_pt[i]) data_list.append(row_list) - + ret = data_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + def GetNeuronStatus(nodes, var_name): "Get neuron group scalar or array variable or parameter" - if (type(nodes)!=list) & (type(nodes)!=tuple) & (type(nodes)!=NodeSeq): + if (type(nodes) != list) & (type(nodes) != tuple) & (type(nodes) != NodeSeq): raise ValueError("Unknown node type") - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - if type(nodes)==NodeSeq: - if (IsNeuronScalParam(nodes.i0, var_name) | - IsNeuronPortParam(nodes.i0, var_name)): + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + if type(nodes) == NodeSeq: + if IsNeuronScalParam(nodes.i0, var_name) | IsNeuronPortParam(nodes.i0, var_name): ret = GetNeuronParam(nodes.i0, nodes.n, var_name) elif IsNeuronArrayParam(nodes.i0, var_name): ret = GetArrayParam(nodes.i0, nodes.n, var_name) - elif (IsNeuronIntVar(nodes.i0, var_name)): + elif IsNeuronIntVar(nodes.i0, var_name): ret = GetNeuronIntVar(nodes.i0, nodes.n, var_name) - elif (IsNeuronScalVar(nodes.i0, var_name) | - IsNeuronPortVar(nodes.i0, var_name)): + elif IsNeuronScalVar(nodes.i0, var_name) | IsNeuronPortVar(nodes.i0, var_name): ret = GetNeuronVar(nodes.i0, nodes.n, var_name) elif IsNeuronArrayVar(nodes.i0, var_name): ret = GetArrayVar(nodes.i0, nodes.n, var_name) elif IsNeuronGroupParam(nodes.i0, var_name): ret = GetNeuronStatus(nodes.ToList(), var_name) - + else: raise ValueError("Unknown neuron variable or parameter") else: - if (IsNeuronScalParam(nodes[0], var_name) | - IsNeuronPortParam(nodes[0], var_name)): + if IsNeuronScalParam(nodes[0], var_name) | IsNeuronPortParam(nodes[0], var_name): ret = GetNeuronPtParam(nodes, var_name) elif IsNeuronArrayParam(nodes[0], var_name): ret = GetNeuronListArrayParam(nodes, var_name) - elif (IsNeuronIntVar(nodes[0], var_name)): + elif IsNeuronIntVar(nodes[0], var_name): ret = GetNeuronPtIntVar(nodes, var_name) - elif (IsNeuronScalVar(nodes[0], var_name) | - IsNeuronPortVar(nodes[0], var_name)): + elif IsNeuronScalVar(nodes[0], var_name) | IsNeuronPortVar(nodes[0], var_name): ret = GetNeuronPtVar(nodes, var_name) elif IsNeuronArrayVar(nodes[0], var_name): ret = GetNeuronListArrayVar(nodes, var_name) @@ -917,6 +1045,8 @@ def GetNeuronStatus(nodes, var_name): NESTGPU_GetNIntVar = _nestgpu.NESTGPU_GetNIntVar NESTGPU_GetNIntVar.argtypes = (ctypes.c_int,) NESTGPU_GetNIntVar.restype = ctypes.c_int + + def GetNIntVar(i_node): "Get number of integer variables for a given node" ret = NESTGPU_GetNIntVar(ctypes.c_int(i_node)) @@ -924,9 +1054,12 @@ def GetNIntVar(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetNScalVar = _nestgpu.NESTGPU_GetNScalVar NESTGPU_GetNScalVar.argtypes = (ctypes.c_int,) NESTGPU_GetNScalVar.restype = ctypes.c_int + + def GetNScalVar(i_node): "Get number of scalar variables for a given node" ret = NESTGPU_GetNScalVar(ctypes.c_int(i_node)) @@ -934,14 +1067,16 @@ def GetNScalVar(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetIntVarNames = _nestgpu.NESTGPU_GetIntVarNames NESTGPU_GetIntVarNames.argtypes = (ctypes.c_int,) NESTGPU_GetIntVarNames.restype = ctypes.POINTER(c_char_p) + + def GetIntVarNames(i_node): "Get list of scalar variable names" n_var = GetNIntVar(i_node) - var_name_pp = ctypes.cast(NESTGPU_GetIntVarNames(ctypes.c_int(i_node)), - ctypes.POINTER(c_char_p)) + var_name_pp = ctypes.cast(NESTGPU_GetIntVarNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) var_name_list = [] for i in range(n_var): var_name_p = var_name_pp[i] @@ -951,14 +1086,16 @@ def GetIntVarNames(i_node): raise ValueError(GetErrorMessage()) return var_name_list + NESTGPU_GetScalVarNames = _nestgpu.NESTGPU_GetScalVarNames NESTGPU_GetScalVarNames.argtypes = (ctypes.c_int,) NESTGPU_GetScalVarNames.restype = ctypes.POINTER(c_char_p) + + def GetScalVarNames(i_node): "Get list of scalar variable names" n_var = GetNScalVar(i_node) - var_name_pp = ctypes.cast(NESTGPU_GetScalVarNames(ctypes.c_int(i_node)), - ctypes.POINTER(c_char_p)) + var_name_pp = ctypes.cast(NESTGPU_GetScalVarNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) var_name_list = [] for i in range(n_var): var_name_p = var_name_pp[i] @@ -968,9 +1105,12 @@ def GetScalVarNames(i_node): raise ValueError(GetErrorMessage()) return var_name_list + NESTGPU_GetNPortVar = _nestgpu.NESTGPU_GetNPortVar NESTGPU_GetNPortVar.argtypes = (ctypes.c_int,) NESTGPU_GetNPortVar.restype = ctypes.c_int + + def GetNPortVar(i_node): "Get number of scalar variables for a given node" ret = NESTGPU_GetNPortVar(ctypes.c_int(i_node)) @@ -978,20 +1118,22 @@ def GetNPortVar(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetPortVarNames = _nestgpu.NESTGPU_GetPortVarNames NESTGPU_GetPortVarNames.argtypes = (ctypes.c_int,) NESTGPU_GetPortVarNames.restype = ctypes.POINTER(c_char_p) + + def GetPortVarNames(i_node): "Get list of scalar variable names" n_var = GetNPortVar(i_node) - var_name_pp = ctypes.cast(NESTGPU_GetPortVarNames(ctypes.c_int(i_node)), - ctypes.POINTER(c_char_p)) + var_name_pp = ctypes.cast(NESTGPU_GetPortVarNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) var_name_list = [] for i in range(n_var): var_name_p = var_name_pp[i] var_name = ctypes.cast(var_name_p, ctypes.c_char_p).value var_name_list.append(to_def_str(var_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return var_name_list @@ -1000,6 +1142,8 @@ def GetPortVarNames(i_node): NESTGPU_GetNScalParam = _nestgpu.NESTGPU_GetNScalParam NESTGPU_GetNScalParam.argtypes = (ctypes.c_int,) NESTGPU_GetNScalParam.restype = ctypes.c_int + + def GetNScalParam(i_node): "Get number of scalar parameters for a given node" ret = NESTGPU_GetNScalParam(ctypes.c_int(i_node)) @@ -1007,27 +1151,32 @@ def GetNScalParam(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetScalParamNames = _nestgpu.NESTGPU_GetScalParamNames NESTGPU_GetScalParamNames.argtypes = (ctypes.c_int,) NESTGPU_GetScalParamNames.restype = ctypes.POINTER(c_char_p) + + def GetScalParamNames(i_node): "Get list of scalar parameter names" n_param = GetNScalParam(i_node) - param_name_pp = ctypes.cast(NESTGPU_GetScalParamNames( - ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetScalParamNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list + NESTGPU_GetNPortParam = _nestgpu.NESTGPU_GetNPortParam NESTGPU_GetNPortParam.argtypes = (ctypes.c_int,) NESTGPU_GetNPortParam.restype = ctypes.c_int + + def GetNPortParam(i_node): "Get number of scalar parameters for a given node" ret = NESTGPU_GetNPortParam(ctypes.c_int(i_node)) @@ -1035,20 +1184,22 @@ def GetNPortParam(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetPortParamNames = _nestgpu.NESTGPU_GetPortParamNames NESTGPU_GetPortParamNames.argtypes = (ctypes.c_int,) NESTGPU_GetPortParamNames.restype = ctypes.POINTER(c_char_p) + + def GetPortParamNames(i_node): "Get list of scalar parameter names" n_param = GetNPortParam(i_node) - param_name_pp = ctypes.cast(NESTGPU_GetPortParamNames( - ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetPortParamNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list @@ -1057,6 +1208,8 @@ def GetPortParamNames(i_node): NESTGPU_GetNArrayParam = _nestgpu.NESTGPU_GetNArrayParam NESTGPU_GetNArrayParam.argtypes = (ctypes.c_int,) NESTGPU_GetNArrayParam.restype = ctypes.c_int + + def GetNArrayParam(i_node): "Get number of scalar parameters for a given node" ret = NESTGPU_GetNArrayParam(ctypes.c_int(i_node)) @@ -1064,20 +1217,22 @@ def GetNArrayParam(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetArrayParamNames = _nestgpu.NESTGPU_GetArrayParamNames NESTGPU_GetArrayParamNames.argtypes = (ctypes.c_int,) NESTGPU_GetArrayParamNames.restype = ctypes.POINTER(c_char_p) + + def GetArrayParamNames(i_node): "Get list of scalar parameter names" n_param = GetNArrayParam(i_node) - param_name_pp = ctypes.cast(NESTGPU_GetArrayParamNames( - ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetArrayParamNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list @@ -1086,6 +1241,8 @@ def GetArrayParamNames(i_node): NESTGPU_GetNGroupParam = _nestgpu.NESTGPU_GetNGroupParam NESTGPU_GetNGroupParam.argtypes = (ctypes.c_int,) NESTGPU_GetNGroupParam.restype = ctypes.c_int + + def GetNGroupParam(i_node): "Get number of scalar parameters for a given node" ret = NESTGPU_GetNGroupParam(ctypes.c_int(i_node)) @@ -1093,27 +1250,32 @@ def GetNGroupParam(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetGroupParamNames = _nestgpu.NESTGPU_GetGroupParamNames NESTGPU_GetGroupParamNames.argtypes = (ctypes.c_int,) NESTGPU_GetGroupParamNames.restype = ctypes.POINTER(c_char_p) + + def GetGroupParamNames(i_node): "Get list of scalar parameter names" n_param = GetNGroupParam(i_node) - param_name_pp = ctypes.cast(NESTGPU_GetGroupParamNames( - ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetGroupParamNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list + NESTGPU_GetNArrayVar = _nestgpu.NESTGPU_GetNArrayVar NESTGPU_GetNArrayVar.argtypes = (ctypes.c_int,) NESTGPU_GetNArrayVar.restype = ctypes.c_int + + def GetNArrayVar(i_node): "Get number of scalar variables for a given node" ret = NESTGPU_GetNArrayVar(ctypes.c_int(i_node)) @@ -1121,69 +1283,64 @@ def GetNArrayVar(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetArrayVarNames = _nestgpu.NESTGPU_GetArrayVarNames NESTGPU_GetArrayVarNames.argtypes = (ctypes.c_int,) NESTGPU_GetArrayVarNames.restype = ctypes.POINTER(c_char_p) + + def GetArrayVarNames(i_node): "Get list of scalar variable names" n_var = GetNArrayVar(i_node) - var_name_pp = ctypes.cast(NESTGPU_GetArrayVarNames(ctypes.c_int(i_node)), - ctypes.POINTER(c_char_p)) + var_name_pp = ctypes.cast(NESTGPU_GetArrayVarNames(ctypes.c_int(i_node)), ctypes.POINTER(c_char_p)) var_name_list = [] for i in range(n_var): var_name_p = var_name_pp[i] var_name = ctypes.cast(var_name_p, ctypes.c_char_p).value var_name_list.append(to_def_str(var_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return var_name_list - - def SetNeuronStatus(nodes, var_name, val): "Set neuron group scalar or array variable or parameter" - if (type(nodes)!=list) & (type(nodes)!=tuple) & (type(nodes)!=NodeSeq): + if (type(nodes) != list) & (type(nodes) != tuple) & (type(nodes) != NodeSeq): raise ValueError("Unknown node type") - if (type(val)==dict): + if type(val) == dict: array_size = len(nodes) arr = DictToArray(val, array_size) for i in range(array_size): SetNeuronStatus([nodes[i]], var_name, arr[i]) return - - c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), - len(var_name)+1) - if type(nodes)==NodeSeq: + + c_var_name = ctypes.create_string_buffer(to_byte_str(var_name), len(var_name) + 1) + if type(nodes) == NodeSeq: if IsNeuronGroupParam(nodes.i0, var_name): SetNeuronGroupParam(nodes, var_name, val) elif IsNeuronScalParam(nodes.i0, var_name): SetNeuronScalParam(nodes.i0, nodes.n, var_name, val) - elif (IsNeuronPortParam(nodes.i0, var_name) | - IsNeuronArrayParam(nodes.i0, var_name)): + elif IsNeuronPortParam(nodes.i0, var_name) | IsNeuronArrayParam(nodes.i0, var_name): SetNeuronArrayParam(nodes.i0, nodes.n, var_name, val) elif IsNeuronIntVar(nodes.i0, var_name): SetNeuronIntVar(nodes.i0, nodes.n, var_name, val) elif IsNeuronScalVar(nodes.i0, var_name): SetNeuronScalVar(nodes.i0, nodes.n, var_name, val) - elif (IsNeuronPortVar(nodes.i0, var_name) | - IsNeuronArrayVar(nodes.i0, var_name)): + elif IsNeuronPortVar(nodes.i0, var_name) | IsNeuronArrayVar(nodes.i0, var_name): SetNeuronArrayVar(nodes.i0, nodes.n, var_name, val) else: raise ValueError("Unknown neuron variable or parameter") - else: + else: if IsNeuronScalParam(nodes[0], var_name): SetNeuronPtScalParam(nodes, var_name, val) - elif (IsNeuronPortParam(nodes[0], var_name) | - IsNeuronArrayParam(nodes[0], var_name)): + elif IsNeuronPortParam(nodes[0], var_name) | IsNeuronArrayParam(nodes[0], var_name): SetNeuronPtArrayParam(nodes, var_name, val) elif IsNeuronIntVar(nodes[0], var_name): SetNeuronPtIntVar(nodes, var_name, val) elif IsNeuronScalVar(nodes[0], var_name): SetNeuronPtScalVar(nodes, var_name, val) - elif (IsNeuronPortVar(nodes[0], var_name) | - IsNeuronArrayVar(nodes[0], var_name)): + elif IsNeuronPortVar(nodes[0], var_name) | IsNeuronArrayVar(nodes[0], var_name): SetNeuronPtArrayVar(nodes, var_name, val) else: raise ValueError("Unknown neuron variable or parameter") @@ -1191,6 +1348,8 @@ def SetNeuronStatus(nodes, var_name, val): NESTGPU_Calibrate = _nestgpu.NESTGPU_Calibrate NESTGPU_Calibrate.restype = ctypes.c_int + + def Calibrate(): "Calibrate simulation" ret = NESTGPU_Calibrate() @@ -1201,6 +1360,8 @@ def Calibrate(): NESTGPU_Simulate = _nestgpu.NESTGPU_Simulate NESTGPU_Simulate.restype = ctypes.c_int + + def Simulate(sim_time=1000.0): "Simulate neural activity" SetSimTime(sim_time) @@ -1213,16 +1374,17 @@ def Simulate(sim_time=1000.0): NESTGPU_ConnectMpiInit = _nestgpu.NESTGPU_ConnectMpiInit NESTGPU_ConnectMpiInit.argtypes = (ctypes.c_int, ctypes.POINTER(c_char_p)) NESTGPU_ConnectMpiInit.restype = ctypes.c_int + + def ConnectMpiInit(): "Initialize MPI connections" - argc=len(sys.argv) + argc = len(sys.argv) array_char_pt_type = c_char_p * argc - c_var_name_list=[] + c_var_name_list = [] for i in range(argc): c_arg = ctypes.create_string_buffer(to_byte_str(sys.argv[i]), 100) - c_var_name_list.append(c_arg) - ret = NESTGPU_ConnectMpiInit(ctypes.c_int(argc), - array_char_pt_type(*c_var_name_list)) + c_var_name_list.append(c_arg) + ret = NESTGPU_ConnectMpiInit(ctypes.c_int(argc), array_char_pt_type(*c_var_name_list)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1230,6 +1392,8 @@ def ConnectMpiInit(): NESTGPU_MpiId = _nestgpu.NESTGPU_MpiId NESTGPU_MpiId.restype = ctypes.c_int + + def MpiId(): "Get MPI Id" ret = NESTGPU_MpiId() @@ -1237,12 +1401,16 @@ def MpiId(): raise ValueError(GetErrorMessage()) return ret + def Rank(): "Get MPI rank" return MpiId() + NESTGPU_MpiNp = _nestgpu.NESTGPU_MpiNp NESTGPU_MpiNp.restype = ctypes.c_int + + def MpiNp(): "Get MPI Np" ret = NESTGPU_MpiNp() @@ -1253,6 +1421,8 @@ def MpiNp(): NESTGPU_ProcMaster = _nestgpu.NESTGPU_ProcMaster NESTGPU_ProcMaster.restype = ctypes.c_int + + def ProcMaster(): "Get MPI ProcMaster" ret = NESTGPU_ProcMaster() @@ -1263,6 +1433,8 @@ def ProcMaster(): NESTGPU_MpiFinalize = _nestgpu.NESTGPU_MpiFinalize NESTGPU_MpiFinalize.restype = ctypes.c_int + + def MpiFinalize(): "Finalize MPI" ret = NESTGPU_MpiFinalize() @@ -1274,6 +1446,8 @@ def MpiFinalize(): NESTGPU_RandomInt = _nestgpu.NESTGPU_RandomInt NESTGPU_RandomInt.argtypes = (ctypes.c_size_t,) NESTGPU_RandomInt.restype = ctypes.POINTER(ctypes.c_uint) + + def RandomInt(n): "Generate n random integers in CUDA memory" ret = NESTGPU_RandomInt(ctypes.c_size_t(n)) @@ -1285,6 +1459,8 @@ def RandomInt(n): NESTGPU_RandomUniform = _nestgpu.NESTGPU_RandomUniform NESTGPU_RandomUniform.argtypes = (ctypes.c_size_t,) NESTGPU_RandomUniform.restype = c_float_p + + def RandomUniform(n): "Generate n random floats with uniform distribution in (0,1) in CUDA memory" ret = NESTGPU_RandomUniform(ctypes.c_size_t(n)) @@ -1296,41 +1472,63 @@ def RandomUniform(n): NESTGPU_RandomNormal = _nestgpu.NESTGPU_RandomNormal NESTGPU_RandomNormal.argtypes = (ctypes.c_size_t, ctypes.c_float, ctypes.c_float) NESTGPU_RandomNormal.restype = c_float_p + + def RandomNormal(n, mean, stddev): "Generate n random floats with normal distribution in CUDA memory" - ret = NESTGPU_RandomNormal(ctypes.c_size_t(n), ctypes.c_float(mean), - ctypes.c_float(stddev)) + ret = NESTGPU_RandomNormal(ctypes.c_size_t(n), ctypes.c_float(mean), ctypes.c_float(stddev)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_RandomNormalClipped = _nestgpu.NESTGPU_RandomNormalClipped -NESTGPU_RandomNormalClipped.argtypes = (ctypes.c_size_t, ctypes.c_float, ctypes.c_float, ctypes.c_float, - ctypes.c_float, ctypes.c_float) +NESTGPU_RandomNormalClipped.argtypes = ( + ctypes.c_size_t, + ctypes.c_float, + ctypes.c_float, + ctypes.c_float, + ctypes.c_float, + ctypes.c_float, +) NESTGPU_RandomNormalClipped.restype = c_float_p + + def RandomNormalClipped(n, mean, stddev, vmin, vmax, vstep=0): "Generate n random floats with normal clipped distribution in CUDA memory" - ret = NESTGPU_RandomNormalClipped(ctypes.c_size_t(n), - ctypes.c_float(mean), - ctypes.c_float(stddev), - ctypes.c_float(vmin), - ctypes.c_float(vmax), - ctypes.c_float(vstep)) + ret = NESTGPU_RandomNormalClipped( + ctypes.c_size_t(n), + ctypes.c_float(mean), + ctypes.c_float(stddev), + ctypes.c_float(vmin), + ctypes.c_float(vmax), + ctypes.c_float(vstep), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_Connect = _nestgpu.NESTGPU_Connect -NESTGPU_Connect.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_ubyte, ctypes.c_float, ctypes.c_float) +NESTGPU_Connect.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_ubyte, + ctypes.c_float, + ctypes.c_float, +) NESTGPU_Connect.restype = ctypes.c_int + + def SingleConnect(i_source_node, i_target_node, i_port, weight, delay): "Connect two nodes" - ret = NESTGPU_Connect(ctypes.c_int(i_source_node), - ctypes.c_int(i_target_node), - ctypes.c_ubyte(i_port), ctypes.c_float(weight), - ctypes.c_float(delay)) + ret = NESTGPU_Connect( + ctypes.c_int(i_source_node), + ctypes.c_int(i_target_node), + ctypes.c_ubyte(i_port), + ctypes.c_float(weight), + ctypes.c_float(delay), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1338,6 +1536,8 @@ def SingleConnect(i_source_node, i_target_node, i_port, weight, delay): NESTGPU_ConnSpecInit = _nestgpu.NESTGPU_ConnSpecInit NESTGPU_ConnSpecInit.restype = ctypes.c_int + + def ConnSpecInit(): "Initialize connection rules specification" ret = NESTGPU_ConnSpecInit() @@ -1349,9 +1549,11 @@ def ConnSpecInit(): NESTGPU_SetConnSpecParam = _nestgpu.NESTGPU_SetConnSpecParam NESTGPU_SetConnSpecParam.argtypes = (c_char_p, ctypes.c_int) NESTGPU_SetConnSpecParam.restype = ctypes.c_int + + def SetConnSpecParam(param_name, val): "Set connection parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_SetConnSpecParam(c_param_name, ctypes.c_int(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -1361,10 +1563,12 @@ def SetConnSpecParam(param_name, val): NESTGPU_ConnSpecIsParam = _nestgpu.NESTGPU_ConnSpecIsParam NESTGPU_ConnSpecIsParam.argtypes = (c_char_p,) NESTGPU_ConnSpecIsParam.restype = ctypes.c_int + + def ConnSpecIsParam(param_name): "Check name of connection parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_ConnSpecIsParam(c_param_name) != 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_ConnSpecIsParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1372,6 +1576,8 @@ def ConnSpecIsParam(param_name): NESTGPU_SynSpecInit = _nestgpu.NESTGPU_SynSpecInit NESTGPU_SynSpecInit.restype = ctypes.c_int + + def SynSpecInit(): "Initializa synapse specification" ret = NESTGPU_SynSpecInit() @@ -1379,35 +1585,44 @@ def SynSpecInit(): raise ValueError(GetErrorMessage()) return ret + NESTGPU_SetSynSpecIntParam = _nestgpu.NESTGPU_SetSynSpecIntParam NESTGPU_SetSynSpecIntParam.argtypes = (c_char_p, ctypes.c_int) NESTGPU_SetSynSpecIntParam.restype = ctypes.c_int + + def SetSynSpecIntParam(param_name, val): "Set synapse int parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_SetSynSpecIntParam(c_param_name, ctypes.c_int(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_SetSynSpecFloatParam = _nestgpu.NESTGPU_SetSynSpecFloatParam NESTGPU_SetSynSpecFloatParam.argtypes = (c_char_p, ctypes.c_float) NESTGPU_SetSynSpecFloatParam.restype = ctypes.c_int + + def SetSynSpecFloatParam(param_name, val): "Set synapse float parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_SetSynSpecFloatParam(c_param_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_SetSynSpecFloatPtParam = _nestgpu.NESTGPU_SetSynSpecFloatPtParam NESTGPU_SetSynSpecFloatPtParam.argtypes = (c_char_p, ctypes.c_void_p) NESTGPU_SetSynSpecFloatPtParam.restype = ctypes.c_int + + def SetSynSpecFloatPtParam(param_name, arr): "Set synapse pointer to float parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - if (type(arr) is list) | (type(arr) is tuple): + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + if (type(arr) is list) | (type(arr) is tuple): arr = (ctypes.c_float * len(arr))(*arr) arr_pt = ctypes.cast(arr, ctypes.c_void_p) ret = NESTGPU_SetSynSpecFloatPtParam(c_param_name, arr_pt) @@ -1419,10 +1634,12 @@ def SetSynSpecFloatPtParam(param_name, arr): NESTGPU_SynSpecIsIntParam = _nestgpu.NESTGPU_SynSpecIsIntParam NESTGPU_SynSpecIsIntParam.argtypes = (c_char_p,) NESTGPU_SynSpecIsIntParam.restype = ctypes.c_int + + def SynSpecIsIntParam(param_name): "Check name of synapse int parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_SynSpecIsIntParam(c_param_name) != 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SynSpecIsIntParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1431,10 +1648,12 @@ def SynSpecIsIntParam(param_name): NESTGPU_SynSpecIsFloatParam = _nestgpu.NESTGPU_SynSpecIsFloatParam NESTGPU_SynSpecIsFloatParam.argtypes = (c_char_p,) NESTGPU_SynSpecIsFloatParam.restype = ctypes.c_int + + def SynSpecIsFloatParam(param_name): "Check name of synapse float parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_SynSpecIsFloatParam(c_param_name) != 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SynSpecIsFloatParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1443,10 +1662,12 @@ def SynSpecIsFloatParam(param_name): NESTGPU_SynSpecIsFloatPtParam = _nestgpu.NESTGPU_SynSpecIsFloatPtParam NESTGPU_SynSpecIsFloatPtParam.argtypes = (c_char_p,) NESTGPU_SynSpecIsFloatPtParam.restype = ctypes.c_int + + def SynSpecIsFloatPtParam(param_name): "Check name of synapse pointer to float parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name)+1) - ret = (NESTGPU_SynSpecIsFloatPtParam(c_param_name) != 0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SynSpecIsFloatPtParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1460,54 +1681,54 @@ def DictToArray(param_dict, array_size): mu = None sigma = None vstep = 0 - + for param_name in param_dict: pval = param_dict[param_name] - if param_name=="array": + if param_name == "array": dist_name = "array" arr = pval - elif param_name=="distribution": + elif param_name == "distribution": dist_name = pval - elif param_name=="low": + elif param_name == "low": low = pval - elif param_name=="high": + elif param_name == "high": high = pval - elif param_name=="mu": + elif param_name == "mu": mu = pval - elif param_name=="sigma": + elif param_name == "sigma": sigma = pval - elif param_name=="step": + elif param_name == "step": vstep = pval else: raise ValueError("Unknown parameter name in dictionary") - if dist_name=="array": + if dist_name == "array": if (type(arr) is list) | (type(arr) is tuple): if len(arr) != array_size: raise ValueError("Wrong array size.") arr = (ctypes.c_float * len(arr))(*arr) - #array_pt = ctypes.cast(arr, ctypes.c_void_p) - #return array_pt + # array_pt = ctypes.cast(arr, ctypes.c_void_p) + # return array_pt return arr - elif dist_name=="normal": + elif dist_name == "normal": return RandomNormal(array_size, mu, sigma) - elif dist_name=="normal_clipped": + elif dist_name == "normal_clipped": return RandomNormalClipped(array_size, mu, sigma, low, high, vstep) else: raise ValueError("Unknown distribution") def RuleArraySize(conn_dict, source, target): - if conn_dict["rule"]=="one_to_one": + if conn_dict["rule"] == "one_to_one": array_size = len(source) - elif conn_dict["rule"]=="all_to_all": - array_size = len(source)*len(target) - elif conn_dict["rule"]=="fixed_total_number": + elif conn_dict["rule"] == "all_to_all": + array_size = len(source) * len(target) + elif conn_dict["rule"] == "fixed_total_number": array_size = conn_dict["total_num"] - elif conn_dict["rule"]=="fixed_indegree": - array_size = len(target)*conn_dict["indegree"] - elif conn_dict["rule"]=="fixed_outdegree": - array_size = len(source)*conn_dict["outdegree"] + elif conn_dict["rule"] == "fixed_indegree": + array_size = len(target) * conn_dict["indegree"] + elif conn_dict["rule"] == "fixed_outdegree": + array_size = len(source) * conn_dict["outdegree"] else: raise ValueError("Unknown number of connections for this rule") return array_size @@ -1515,49 +1736,65 @@ def RuleArraySize(conn_dict, source, target): def SetSynParamFromArray(param_name, par_dict, array_size): arr_param_name = param_name + "_array" - if (not SynSpecIsFloatPtParam(arr_param_name)): - raise ValueError("Synapse parameter cannot be set by" - " arrays or distributions") + if not SynSpecIsFloatPtParam(arr_param_name): + raise ValueError("Synapse parameter cannot be set by" " arrays or distributions") arr = DictToArray(par_dict, array_size) - + array_pt = ctypes.cast(arr, ctypes.c_void_p) SetSynSpecFloatPtParam(arr_param_name, array_pt) - + NESTGPU_ConnectSeqSeq = _nestgpu.NESTGPU_ConnectSeqSeq -NESTGPU_ConnectSeqSeq.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_int, - ctypes.c_int) +NESTGPU_ConnectSeqSeq.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, +) NESTGPU_ConnectSeqSeq.restype = ctypes.c_int NESTGPU_ConnectSeqGroup = _nestgpu.NESTGPU_ConnectSeqGroup -NESTGPU_ConnectSeqGroup.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int) +NESTGPU_ConnectSeqGroup.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, +) NESTGPU_ConnectSeqGroup.restype = ctypes.c_int NESTGPU_ConnectGroupSeq = _nestgpu.NESTGPU_ConnectGroupSeq -NESTGPU_ConnectGroupSeq.argtypes = (ctypes.c_void_p, ctypes.c_int, - ctypes.c_int, ctypes.c_int) +NESTGPU_ConnectGroupSeq.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, +) NESTGPU_ConnectGroupSeq.restype = ctypes.c_int NESTGPU_ConnectGroupGroup = _nestgpu.NESTGPU_ConnectGroupGroup -NESTGPU_ConnectGroupGroup.argtypes = (ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int) +NESTGPU_ConnectGroupGroup.argtypes = ( + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, +) NESTGPU_ConnectGroupGroup.restype = ctypes.c_int -def Connect(source, target, conn_dict, syn_dict): + +def Connect(source, target, conn_dict, syn_dict): "Connect two node groups" - if (type(source)!=list) & (type(source)!=tuple) & (type(source)!=NodeSeq): + if (type(source) != list) & (type(source) != tuple) & (type(source) != NodeSeq): raise ValueError("Unknown source type") - if (type(target)!=list) & (type(target)!=tuple) & (type(target)!=NodeSeq): + if (type(target) != list) & (type(target) != tuple) & (type(target) != NodeSeq): raise ValueError("Unknown target type") gc.disable() ConnSpecInit() SynSpecInit() for param_name in conn_dict: - if param_name=="rule": + if param_name == "rule": for i_rule in range(len(conn_rule_name)): - if conn_dict[param_name]==conn_rule_name[i_rule]: + if conn_dict[param_name] == conn_rule_name[i_rule]: break if i_rule < len(conn_rule_name): SetConnSpecParam(param_name, i_rule) @@ -1567,18 +1804,18 @@ def Connect(source, target, conn_dict, syn_dict): SetConnSpecParam(param_name, conn_dict[param_name]) else: raise ValueError("Unknown connection parameter") - + array_size = RuleArraySize(conn_dict, source, target) - + for param_name in syn_dict: if SynSpecIsIntParam(param_name): val = syn_dict[param_name] - if ((param_name=="synapse_group") & (type(val)==SynGroup)): + if (param_name == "synapse_group") & (type(val) == SynGroup): val = val.i_syn_group SetSynSpecIntParam(param_name, val) elif SynSpecIsFloatParam(param_name): fpar = syn_dict[param_name] - if (type(fpar)==dict): + if type(fpar) == dict: SetSynParamFromArray(param_name, fpar, array_size) else: SetSynSpecFloatParam(param_name, fpar) @@ -1587,24 +1824,21 @@ def Connect(source, target, conn_dict, syn_dict): SetSynSpecFloatPtParam(param_name, syn_dict[param_name]) else: raise ValueError("Unknown synapse parameter") - if (type(source)==NodeSeq) & (type(target)==NodeSeq) : + if (type(source) == NodeSeq) & (type(target) == NodeSeq): ret = NESTGPU_ConnectSeqSeq(source.i0, source.n, target.i0, target.n) else: - if type(source)!=NodeSeq: - source_arr = (ctypes.c_int * len(source))(*source) - source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) - if type(target)!=NodeSeq: - target_arr = (ctypes.c_int * len(target))(*target) - target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) - if (type(source)==NodeSeq) & (type(target)!=NodeSeq): - ret = NESTGPU_ConnectSeqGroup(source.i0, source.n, target_arr_pt, - len(target)) - elif (type(source)!=NodeSeq) & (type(target)==NodeSeq): - ret = NESTGPU_ConnectGroupSeq(source_arr_pt, len(source), - target.i0, target.n) + if type(source) != NodeSeq: + source_arr = (ctypes.c_int * len(source))(*source) + source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) + if type(target) != NodeSeq: + target_arr = (ctypes.c_int * len(target))(*target) + target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) + if (type(source) == NodeSeq) & (type(target) != NodeSeq): + ret = NESTGPU_ConnectSeqGroup(source.i0, source.n, target_arr_pt, len(target)) + elif (type(source) != NodeSeq) & (type(target) == NodeSeq): + ret = NESTGPU_ConnectGroupSeq(source_arr_pt, len(source), target.i0, target.n) else: - ret = NESTGPU_ConnectGroupGroup(source_arr_pt, len(source), - target_arr_pt, len(target)) + ret = NESTGPU_ConnectGroupGroup(source_arr_pt, len(source), target_arr_pt, len(target)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) gc.enable() @@ -1612,99 +1846,129 @@ def Connect(source, target, conn_dict, syn_dict): NESTGPU_RemoteConnectSeqSeq = _nestgpu.NESTGPU_RemoteConnectSeqSeq -NESTGPU_RemoteConnectSeqSeq.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int) +NESTGPU_RemoteConnectSeqSeq.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, +) NESTGPU_RemoteConnectSeqSeq.restype = ctypes.c_int NESTGPU_RemoteConnectSeqGroup = _nestgpu.NESTGPU_RemoteConnectSeqGroup -NESTGPU_RemoteConnectSeqGroup.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int) +NESTGPU_RemoteConnectSeqGroup.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, +) NESTGPU_RemoteConnectSeqGroup.restype = ctypes.c_int NESTGPU_RemoteConnectGroupSeq = _nestgpu.NESTGPU_RemoteConnectGroupSeq -NESTGPU_RemoteConnectGroupSeq.argtypes = (ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int) +NESTGPU_RemoteConnectGroupSeq.argtypes = ( + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, +) NESTGPU_RemoteConnectGroupSeq.restype = ctypes.c_int NESTGPU_RemoteConnectGroupGroup = _nestgpu.NESTGPU_RemoteConnectGroupGroup -NESTGPU_RemoteConnectGroupGroup.argtypes = (ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int) +NESTGPU_RemoteConnectGroupGroup.argtypes = ( + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_int, +) NESTGPU_RemoteConnectGroupGroup.restype = ctypes.c_int -def RemoteConnect(i_source_host, source, i_target_host, target, - conn_dict, syn_dict): + +def RemoteConnect(i_source_host, source, i_target_host, target, conn_dict, syn_dict): "Connect two node groups of differen mpi hosts" - if (type(i_source_host)!=int) | (type(i_target_host)!=int): + if (type(i_source_host) != int) | (type(i_target_host) != int): raise ValueError("Error in host index") - if (type(source)!=list) & (type(source)!=tuple) & (type(source)!=NodeSeq): + if (type(source) != list) & (type(source) != tuple) & (type(source) != NodeSeq): raise ValueError("Unknown source type") - if (type(target)!=list) & (type(target)!=tuple) & (type(target)!=NodeSeq): + if (type(target) != list) & (type(target) != tuple) & (type(target) != NodeSeq): raise ValueError("Unknown target type") - + ConnSpecInit() SynSpecInit() for param_name in conn_dict: - if param_name=="rule": + if param_name == "rule": for i_rule in range(len(conn_rule_name)): - if conn_dict[param_name]==conn_rule_name[i_rule]: + if conn_dict[param_name] == conn_rule_name[i_rule]: break if i_rule < len(conn_rule_name): SetConnSpecParam(param_name, i_rule) else: raise ValueError("Unknown connection rule") - + elif ConnSpecIsParam(param_name): SetConnSpecParam(param_name, conn_dict[param_name]) else: raise ValueError("Unknown connection parameter") - - array_size = RuleArraySize(conn_dict, source, target) - + + array_size = RuleArraySize(conn_dict, source, target) + for param_name in syn_dict: if SynSpecIsIntParam(param_name): SetSynSpecIntParam(param_name, syn_dict[param_name]) elif SynSpecIsFloatParam(param_name): fpar = syn_dict[param_name] - if (type(fpar)==dict): + if type(fpar) == dict: SetSynParamFromArray(param_name, fpar, array_size) else: SetSynSpecFloatParam(param_name, fpar) - + elif SynSpecIsFloatPtParam(param_name): SetSynSpecFloatPtParam(param_name, syn_dict[param_name]) else: raise ValueError("Unknown synapse parameter") - if (type(source)==NodeSeq) & (type(target)==NodeSeq) : - ret = NESTGPU_RemoteConnectSeqSeq(i_source_host, source.i0, source.n, - i_target_host, target.i0, target.n) + if (type(source) == NodeSeq) & (type(target) == NodeSeq): + ret = NESTGPU_RemoteConnectSeqSeq(i_source_host, source.i0, source.n, i_target_host, target.i0, target.n) else: - if type(source)!=NodeSeq: - source_arr = (ctypes.c_int * len(source))(*source) - source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) - if type(target)!=NodeSeq: - target_arr = (ctypes.c_int * len(target))(*target) - target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) - if (type(source)==NodeSeq) & (type(target)!=NodeSeq): - ret = NESTGPU_RemoteConnectSeqGroup(i_source_host, source.i0, - source.n, i_target_host, - target_arr_pt, len(target)) - elif (type(source)!=NodeSeq) & (type(target)==NodeSeq): - ret = NESTGPU_RemoteConnectGroupSeq(i_source_host, source_arr_pt, - len(source), - i_target_host, target.i0, - target.n) + if type(source) != NodeSeq: + source_arr = (ctypes.c_int * len(source))(*source) + source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) + if type(target) != NodeSeq: + target_arr = (ctypes.c_int * len(target))(*target) + target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) + if (type(source) == NodeSeq) & (type(target) != NodeSeq): + ret = NESTGPU_RemoteConnectSeqGroup( + i_source_host, + source.i0, + source.n, + i_target_host, + target_arr_pt, + len(target), + ) + elif (type(source) != NodeSeq) & (type(target) == NodeSeq): + ret = NESTGPU_RemoteConnectGroupSeq( + i_source_host, + source_arr_pt, + len(source), + i_target_host, + target.i0, + target.n, + ) else: - ret = NESTGPU_RemoteConnectGroupGroup(i_source_host, - source_arr_pt, - len(source), - i_target_host, - target_arr_pt, - len(target)) + ret = NESTGPU_RemoteConnectGroupGroup( + i_source_host, + source_arr_pt, + len(source), + i_target_host, + target_arr_pt, + len(target), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -1713,27 +1977,27 @@ def RemoteConnect(i_source_host, source, i_target_host, target, def SetStatus(gen_object, params, val=None): "Set neuron or synapse group parameters or variables using dictionaries" - if type(gen_object)==RemoteNodeSeq: - if gen_object.i_host==MpiId(): + if type(gen_object) == RemoteNodeSeq: + if gen_object.i_host == MpiId(): SetStatus(gen_object.node_seq, params, val) return - + gc.disable() - if type(gen_object)==SynGroup: + if type(gen_object) == SynGroup: ret = SetSynGroupStatus(gen_object, params, val) gc.enable() return ret - nodes = gen_object + nodes = gen_object if val != None: - SetNeuronStatus(nodes, params, val) - elif type(params)==dict: + SetNeuronStatus(nodes, params, val) + elif type(params) == dict: for param_name in params: SetNeuronStatus(nodes, param_name, params[param_name]) - elif (type(params)==list) | (type(params) is tuple): + elif (type(params) == list) | (type(params) is tuple): if len(params) != len(nodes): raise ValueError("List should have the same size as nodes") for param_dict in params: - if type(param_dict)!=dict: + if type(param_dict) != dict: raise ValueError("Type of list elements should be dict") for param_name in param_dict: SetNeuronStatus(nodes, param_name, param_dict[param_name]) @@ -1743,233 +2007,288 @@ def SetStatus(gen_object, params, val=None): raise ValueError(GetErrorMessage()) gc.enable() -#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + +# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx NESTGPU_GetSeqSeqConnections = _nestgpu.NESTGPU_GetSeqSeqConnections -NESTGPU_GetSeqSeqConnections.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_int, ctypes.c_int, - ctypes.c_int, c_int_p) +NESTGPU_GetSeqSeqConnections.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + c_int_p, +) NESTGPU_GetSeqSeqConnections.restype = c_int_p NESTGPU_GetSeqGroupConnections = _nestgpu.NESTGPU_GetSeqGroupConnections -NESTGPU_GetSeqGroupConnections.argtypes = (ctypes.c_int, ctypes.c_int, - c_void_p, ctypes.c_int, - ctypes.c_int, c_int_p) +NESTGPU_GetSeqGroupConnections.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_void_p, + ctypes.c_int, + ctypes.c_int, + c_int_p, +) NESTGPU_GetSeqGroupConnections.restype = c_int_p NESTGPU_GetGroupSeqConnections = _nestgpu.NESTGPU_GetGroupSeqConnections -NESTGPU_GetGroupSeqConnections.argtypes = (c_void_p, ctypes.c_int, - ctypes.c_int, ctypes.c_int, - ctypes.c_int, c_int_p) +NESTGPU_GetGroupSeqConnections.argtypes = ( + c_void_p, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + c_int_p, +) NESTGPU_GetGroupSeqConnections.restype = c_int_p NESTGPU_GetGroupGroupConnections = _nestgpu.NESTGPU_GetGroupGroupConnections -NESTGPU_GetGroupGroupConnections.argtypes = (c_void_p, ctypes.c_int, - c_void_p, ctypes.c_int, - ctypes.c_int, c_int_p) +NESTGPU_GetGroupGroupConnections.argtypes = ( + c_void_p, + ctypes.c_int, + c_void_p, + ctypes.c_int, + ctypes.c_int, + c_int_p, +) NESTGPU_GetGroupGroupConnections.restype = c_int_p -def GetConnections(source=None, target=None, syn_group=-1): + +def GetConnections(source=None, target=None, syn_group=-1): "Get connections between two node groups" - if source==None: + if source == None: source = NodeSeq(None) - if target==None: + if target == None: target = NodeSeq(None) - if (type(source)==int): + if type(source) == int: source = [source] - if (type(target)==int): + if type(target) == int: target = [target] - if (type(source)!=list) & (type(source)!=tuple) & (type(source)!=NodeSeq): + if (type(source) != list) & (type(source) != tuple) & (type(source) != NodeSeq): raise ValueError("Unknown source type") - if (type(target)!=list) & (type(target)!=tuple) & (type(target)!=NodeSeq): + if (type(target) != list) & (type(target) != tuple) & (type(target) != NodeSeq): raise ValueError("Unknown target type") - + n_conn = ctypes.c_int(0) - if (type(source)==NodeSeq) & (type(target)==NodeSeq) : - conn_arr = NESTGPU_GetSeqSeqConnections(source.i0, source.n, - target.i0, target.n, - syn_group, - ctypes.byref(n_conn)) + if (type(source) == NodeSeq) & (type(target) == NodeSeq): + conn_arr = NESTGPU_GetSeqSeqConnections( + source.i0, source.n, target.i0, target.n, syn_group, ctypes.byref(n_conn) + ) else: - if type(source)!=NodeSeq: - source_arr = (ctypes.c_int * len(source))(*source) - source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) - if type(target)!=NodeSeq: - target_arr = (ctypes.c_int * len(target))(*target) - target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) - if (type(source)==NodeSeq) & (type(target)!=NodeSeq): - conn_arr = NESTGPU_GetSeqGroupConnections(source.i0, source.n, - target_arr_pt, - len(target), - syn_group, - ctypes.byref(n_conn)) - elif (type(source)!=NodeSeq) & (type(target)==NodeSeq): - conn_arr = NESTGPU_GetGroupSeqConnections(source_arr_pt, - len(source), - target.i0, target.n, - syn_group, - ctypes.byref(n_conn)) + if type(source) != NodeSeq: + source_arr = (ctypes.c_int * len(source))(*source) + source_arr_pt = ctypes.cast(source_arr, ctypes.c_void_p) + if type(target) != NodeSeq: + target_arr = (ctypes.c_int * len(target))(*target) + target_arr_pt = ctypes.cast(target_arr, ctypes.c_void_p) + if (type(source) == NodeSeq) & (type(target) != NodeSeq): + conn_arr = NESTGPU_GetSeqGroupConnections( + source.i0, + source.n, + target_arr_pt, + len(target), + syn_group, + ctypes.byref(n_conn), + ) + elif (type(source) != NodeSeq) & (type(target) == NodeSeq): + conn_arr = NESTGPU_GetGroupSeqConnections( + source_arr_pt, + len(source), + target.i0, + target.n, + syn_group, + ctypes.byref(n_conn), + ) else: - conn_arr = NESTGPU_GetGroupGroupConnections(source_arr_pt, - len(source), - target_arr_pt, - len(target), - syn_group, - ctypes.byref(n_conn)) + conn_arr = NESTGPU_GetGroupGroupConnections( + source_arr_pt, + len(source), + target_arr_pt, + len(target), + syn_group, + ctypes.byref(n_conn), + ) conn_list = [] for i_conn in range(n_conn.value): - conn_id = ConnectionId(conn_arr[i_conn*3], conn_arr[i_conn*3 + 1], - conn_arr[i_conn*3 + 2]) + conn_id = ConnectionId(conn_arr[i_conn * 3], conn_arr[i_conn * 3 + 1], conn_arr[i_conn * 3 + 2]) conn_list.append(conn_id) - + ret = conn_list if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_GetConnectionStatus = _nestgpu.NESTGPU_GetConnectionStatus -NESTGPU_GetConnectionStatus.argtypes = (ctypes.c_int, ctypes.c_int, - ctypes.c_int, c_int_p, - c_char_p, c_char_p, - c_float_p, c_float_p) +NESTGPU_GetConnectionStatus.argtypes = ( + ctypes.c_int, + ctypes.c_int, + ctypes.c_int, + c_int_p, + c_char_p, + c_char_p, + c_float_p, + c_float_p, +) NESTGPU_GetConnectionStatus.restype = ctypes.c_int + def GetConnectionStatus(conn_id): i_source = conn_id.i_source i_group = conn_id.i_group i_conn = conn_id.i_conn - + i_target = ctypes.c_int(0) i_port = ctypes.c_char() i_syn = ctypes.c_char() delay = ctypes.c_float(0.0) weight = ctypes.c_float(0.0) - NESTGPU_GetConnectionStatus(i_source, i_group, i_conn, - ctypes.byref(i_target), - ctypes.byref(i_port), - ctypes.byref(i_syn), - ctypes.byref(delay), - ctypes.byref(weight)) + NESTGPU_GetConnectionStatus( + i_source, + i_group, + i_conn, + ctypes.byref(i_target), + ctypes.byref(i_port), + ctypes.byref(i_syn), + ctypes.byref(delay), + ctypes.byref(weight), + ) i_target = i_target.value i_port = ord(i_port.value) i_syn = ord(i_syn.value) delay = delay.value weight = weight.value - conn_status_dict = {"source":i_source, "target":i_target, "port":i_port, - "syn":i_syn, "delay":delay, "weight":weight} + conn_status_dict = { + "source": i_source, + "target": i_target, + "port": i_port, + "syn": i_syn, + "delay": delay, + "weight": weight, + } return conn_status_dict def GetStatus(gen_object, var_key=None): "Get neuron group, connection or synapse group status" - if type(gen_object)==SynGroup: + if type(gen_object) == SynGroup: return GetSynGroupStatus(gen_object, var_key) - - if type(gen_object)==NodeSeq: + + if type(gen_object) == NodeSeq: gen_object = gen_object.ToList() - if (type(gen_object)==list) | (type(gen_object)==tuple): + if (type(gen_object) == list) | (type(gen_object) == tuple): status_list = [] for gen_elem in gen_object: elem_dict = GetStatus(gen_elem, var_key) status_list.append(elem_dict) return status_list - if (type(var_key)==list) | (type(var_key)==tuple): + if (type(var_key) == list) | (type(var_key) == tuple): status_list = [] for var_elem in var_key: var_value = GetStatus(gen_object, var_elem) status_list.append(var_value) return status_list - elif (var_key==None): - if (type(gen_object)==ConnectionId): + elif var_key == None: + if type(gen_object) == ConnectionId: status_dict = GetConnectionStatus(gen_object) - elif (type(gen_object)==int): + elif type(gen_object) == int: i_node = gen_object status_dict = {} - name_list = GetIntVarNames(i_node) \ - + GetScalVarNames(i_node) + GetScalParamNames(i_node) \ - + GetPortVarNames(i_node) + GetPortParamNames(i_node) \ - + GetArrayVarNames(i_node) \ - + GetArrayParamNames(i_node) \ - + GetGroupParamNames(i_node) + name_list = ( + GetIntVarNames(i_node) + + GetScalVarNames(i_node) + + GetScalParamNames(i_node) + + GetPortVarNames(i_node) + + GetPortParamNames(i_node) + + GetArrayVarNames(i_node) + + GetArrayParamNames(i_node) + + GetGroupParamNames(i_node) + ) for var_name in name_list: val = GetStatus(i_node, var_name) status_dict[var_name] = val else: raise ValueError("Unknown object type in GetStatus") return status_dict - elif (type(var_key)==str) | (type(var_key)==bytes): - if (type(gen_object)==ConnectionId): + elif (type(var_key) == str) | (type(var_key) == bytes): + if type(gen_object) == ConnectionId: status_dict = GetConnectionStatus(gen_object) return status_dict[var_key] - elif (type(gen_object)==int): + elif type(gen_object) == int: i_node = gen_object return GetNeuronStatus([i_node], var_key)[0] else: raise ValueError("Unknown object type in GetStatus") - + else: raise ValueError("Unknown key type in GetStatus", type(var_key)) - NESTGPU_CreateSynGroup = _nestgpu.NESTGPU_CreateSynGroup NESTGPU_CreateSynGroup.argtypes = (c_char_p,) NESTGPU_CreateSynGroup.restype = ctypes.c_int + + def CreateSynGroup(model_name, status_dict=None): "Create a synapse group" - if (type(status_dict)==dict): + if type(status_dict) == dict: syn_group = CreateSynGroup(model_name) SetStatus(syn_group, status_dict) return syn_group - elif status_dict!=None: + elif status_dict != None: raise ValueError("Wrong argument in CreateSynGroup") - c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), \ - len(model_name)+1) - i_syn_group = NESTGPU_CreateSynGroup(c_model_name) + c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), len(model_name) + 1) + i_syn_group = NESTGPU_CreateSynGroup(c_model_name) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return SynGroup(i_syn_group) - + NESTGPU_GetSynGroupNParam = _nestgpu.NESTGPU_GetSynGroupNParam NESTGPU_GetSynGroupNParam.argtypes = (ctypes.c_int,) NESTGPU_GetSynGroupNParam.restype = ctypes.c_int + + def GetSynGroupNParam(syn_group): "Get number of synapse parameters for a given synapse group" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in GetSynGroupNParam") i_syn_group = syn_group.i_syn_group - + ret = NESTGPU_GetSynGroupNParam(ctypes.c_int(i_syn_group)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_GetSynGroupParamNames = _nestgpu.NESTGPU_GetSynGroupParamNames NESTGPU_GetSynGroupParamNames.argtypes = (ctypes.c_int,) NESTGPU_GetSynGroupParamNames.restype = ctypes.POINTER(c_char_p) + + def GetSynGroupParamNames(syn_group): "Get list of synapse group parameter names" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in GetSynGroupParamNames") i_syn_group = syn_group.i_syn_group n_param = GetSynGroupNParam(syn_group) - param_name_pp = ctypes.cast(NESTGPU_GetSynGroupParamNames( - ctypes.c_int(i_syn_group)), ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast( + NESTGPU_GetSynGroupParamNames(ctypes.c_int(i_syn_group)), + ctypes.POINTER(c_char_p), + ) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list @@ -1978,93 +2297,94 @@ def GetSynGroupParamNames(syn_group): NESTGPU_IsSynGroupParam = _nestgpu.NESTGPU_IsSynGroupParam NESTGPU_IsSynGroupParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_IsSynGroupParam.restype = ctypes.c_int + + def IsSynGroupParam(syn_group, param_name): "Check name of synapse group parameter" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in IsSynGroupParam") i_syn_group = syn_group.i_syn_group - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = (NESTGPU_IsSynGroupParam(ctypes.c_int(i_syn_group), \ - c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsSynGroupParam(ctypes.c_int(i_syn_group), c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_GetSynGroupParam = _nestgpu.NESTGPU_GetSynGroupParam NESTGPU_GetSynGroupParam.argtypes = (ctypes.c_int, c_char_p) NESTGPU_GetSynGroupParam.restype = ctypes.c_float + + def GetSynGroupParam(syn_group, param_name): "Get synapse group parameter value" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in GetSynGroupParam") i_syn_group = syn_group.i_syn_group - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + + ret = NESTGPU_GetSynGroupParam(ctypes.c_int(i_syn_group), c_param_name) - ret = NESTGPU_GetSynGroupParam(ctypes.c_int(i_syn_group), - c_param_name) - if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_SetSynGroupParam = _nestgpu.NESTGPU_SetSynGroupParam -NESTGPU_SetSynGroupParam.argtypes = (ctypes.c_int, c_char_p, - ctypes.c_float) +NESTGPU_SetSynGroupParam.argtypes = (ctypes.c_int, c_char_p, ctypes.c_float) NESTGPU_SetSynGroupParam.restype = ctypes.c_int + + def SetSynGroupParam(syn_group, param_name, val): "Set synapse group parameter value" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in SetSynGroupParam") i_syn_group = syn_group.i_syn_group - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = NESTGPU_SetSynGroupParam(ctypes.c_int(i_syn_group), - c_param_name, ctypes.c_float(val)) - + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SetSynGroupParam(ctypes.c_int(i_syn_group), c_param_name, ctypes.c_float(val)) + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + def GetSynGroupStatus(syn_group, var_key=None): "Get synapse group status" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in GetSynGroupStatus") - if (type(var_key)==list) | (type(var_key)==tuple): + if (type(var_key) == list) | (type(var_key) == tuple): status_list = [] for var_elem in var_key: var_value = GetSynGroupStatus(syn_group, var_elem) status_list.append(var_value) return status_list - elif (var_key==None): + elif var_key == None: status_dict = {} name_list = GetSynGroupParamNames(syn_group) for param_name in name_list: val = GetSynGroupStatus(syn_group, param_name) status_dict[param_name] = val return status_dict - elif (type(var_key)==str) | (type(var_key)==bytes): - return GetSynGroupParam(syn_group, var_key) + elif (type(var_key) == str) | (type(var_key) == bytes): + return GetSynGroupParam(syn_group, var_key) else: raise ValueError("Unknown key type in GetSynGroupStatus", type(var_key)) + def SetSynGroupStatus(syn_group, params, val=None): "Set synapse group parameters using dictionaries" - if type(syn_group)!=SynGroup: + if type(syn_group) != SynGroup: raise ValueError("Wrong argument type in SetSynGroupStatus") - if ((type(params)==dict) & (val==None)): + if (type(params) == dict) & (val == None): for param_name in params: SetSynGroupStatus(syn_group, param_name, params[param_name]) - elif (type(params)==str): - return SetSynGroupParam(syn_group, params, val) + elif type(params) == str: + return SetSynGroupParam(syn_group, params, val) else: - raise ValueError("Wrong argument in SetSynGroupStatus") + raise ValueError("Wrong argument in SetSynGroupStatus") if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -2072,13 +2392,14 @@ def SetSynGroupStatus(syn_group, params, val=None): NESTGPU_ActivateSpikeCount = _nestgpu.NESTGPU_ActivateSpikeCount NESTGPU_ActivateSpikeCount.argtypes = (ctypes.c_int, ctypes.c_int) NESTGPU_ActivateSpikeCount.restype = ctypes.c_int + + def ActivateSpikeCount(nodes): "Activate spike count for node group" - if type(nodes)!=NodeSeq: + if type(nodes) != NodeSeq: raise ValueError("Argument type of ActivateSpikeCount must be NodeSeq") - ret = NESTGPU_ActivateSpikeCount(ctypes.c_int(nodes.i0), - ctypes.c_int(nodes.n)) + ret = NESTGPU_ActivateSpikeCount(ctypes.c_int(nodes.i0), ctypes.c_int(nodes.n)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -2086,34 +2407,41 @@ def ActivateSpikeCount(nodes): NESTGPU_ActivateRecSpikeTimes = _nestgpu.NESTGPU_ActivateRecSpikeTimes -NESTGPU_ActivateRecSpikeTimes.argtypes = (ctypes.c_int, ctypes.c_int, \ - ctypes.c_int) +NESTGPU_ActivateRecSpikeTimes.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_int) NESTGPU_ActivateRecSpikeTimes.restype = ctypes.c_int + + def ActivateRecSpikeTimes(nodes, max_n_rec_spike_times): "Activate spike time recording for node group" - if type(nodes)!=NodeSeq: + if type(nodes) != NodeSeq: raise ValueError("Argument type of ActivateRecSpikeTimes must be NodeSeq") - ret = NESTGPU_ActivateRecSpikeTimes(ctypes.c_int(nodes.i0), - ctypes.c_int(nodes.n), - ctypes.c_int(max_n_rec_spike_times)) + ret = NESTGPU_ActivateRecSpikeTimes( + ctypes.c_int(nodes.i0), + ctypes.c_int(nodes.n), + ctypes.c_int(max_n_rec_spike_times), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_SetRecSpikeTimesStep = _nestgpu.NESTGPU_SetRecSpikeTimesStep -NESTGPU_SetRecSpikeTimesStep.argtypes = (ctypes.c_int, ctypes.c_int, \ - ctypes.c_int) +NESTGPU_SetRecSpikeTimesStep.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_int) NESTGPU_SetRecSpikeTimesStep.restype = ctypes.c_int + + def SetRecSpikeTimesStep(nodes, rec_spike_times_step): "Setp number of time steps for buffering spike time recording" - if type(nodes)!=NodeSeq: + if type(nodes) != NodeSeq: raise ValueError("Argument type of SetRecSpikeTimesStep must be NodeSeq") - ret = NESTGPU_SetRecSpikeTimesStep(ctypes.c_int(nodes.i0), - ctypes.c_int(nodes.n), - ctypes.c_int(rec_spike_times_step)) + ret = NESTGPU_SetRecSpikeTimesStep( + ctypes.c_int(nodes.i0), + ctypes.c_int(nodes.n), + ctypes.c_int(rec_spike_times_step), + ) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -2123,6 +2451,8 @@ def SetRecSpikeTimesStep(nodes, rec_spike_times_step): NESTGPU_GetNRecSpikeTimes = _nestgpu.NESTGPU_GetNRecSpikeTimes NESTGPU_GetNRecSpikeTimes.argtypes = (ctypes.c_int,) NESTGPU_GetNRecSpikeTimes.restype = ctypes.c_int + + def GetNRecSpikeTimes(i_node): "Get number of recorded spike times for node" @@ -2132,51 +2462,54 @@ def GetNRecSpikeTimes(i_node): raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetRecSpikeTimes = _nestgpu.NESTGPU_GetRecSpikeTimes NESTGPU_GetRecSpikeTimes.argtypes = (ctypes.c_int, ctypes.c_int, c_int_pp, c_float_ppp) NESTGPU_GetRecSpikeTimes.restype = ctypes.c_int + def GetRecSpikeTimes(nodes): "Get recorded spike times for node group" - if type(nodes)!=NodeSeq: + if type(nodes) != NodeSeq: raise ValueError("First argument type of GetRecSpikeTimes must be NodeSeq") n_spike_times = (c_int_p * 1)() - n_spike_times_pt = ctypes.cast(n_spike_times, c_int_pp) + n_spike_times_pt = ctypes.cast(n_spike_times, c_int_pp) spike_times = (c_float_pp * 1)() - spike_times_pt = ctypes.cast(spike_times, c_float_ppp) + spike_times_pt = ctypes.cast(spike_times, c_float_ppp) - spike_time_list = [] - ret1 = NESTGPU_GetRecSpikeTimes(ctypes.c_int(nodes.i0), ctypes.c_int(nodes.n), - n_spike_times_pt, spike_times_pt) + ret1 = NESTGPU_GetRecSpikeTimes(ctypes.c_int(nodes.i0), ctypes.c_int(nodes.n), n_spike_times_pt, spike_times_pt) for i_n in range(nodes.n): spike_time_list.append([]) n_spike = n_spike_times_pt[0][i_n] for i_spike in range(n_spike): spike_time_list[i_n].append(spike_times_pt[0][i_n][i_spike]) - + ret = spike_time_list - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret NESTGPU_SetNeuronGroupParam = _nestgpu.NESTGPU_SetNeuronGroupParam -NESTGPU_SetNeuronGroupParam.argtypes = (ctypes.c_int, ctypes.c_int, - c_char_p, ctypes.c_float) +NESTGPU_SetNeuronGroupParam.argtypes = ( + ctypes.c_int, + ctypes.c_int, + c_char_p, + ctypes.c_float, +) NESTGPU_SetNeuronGroupParam.restype = ctypes.c_int + + def SetNeuronGroupParam(nodes, param_name, val): "Set neuron group parameter value" - if type(nodes)!=NodeSeq: + if type(nodes) != NodeSeq: raise ValueError("Wrong argument type in SetNeuronGroupParam") - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = NESTGPU_SetNeuronGroupParam(ctypes.c_int(nodes.i0), - ctypes.c_int(nodes.n), - c_param_name, ctypes.c_float(val)) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_SetNeuronGroupParam(ctypes.c_int(nodes.i0), ctypes.c_int(nodes.n), c_param_name, ctypes.c_float(val)) if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -2184,9 +2517,11 @@ def SetNeuronGroupParam(nodes, param_name, val): NESTGPU_GetNBoolParam = _nestgpu.NESTGPU_GetNBoolParam NESTGPU_GetNBoolParam.restype = ctypes.c_int + + def GetNBoolParam(): "Get number of kernel boolean parameters" - + ret = NESTGPU_GetNBoolParam() if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -2195,18 +2530,19 @@ def GetNBoolParam(): NESTGPU_GetBoolParamNames = _nestgpu.NESTGPU_GetBoolParamNames NESTGPU_GetBoolParamNames.restype = ctypes.POINTER(c_char_p) + + def GetBoolParamNames(): "Get list of kernel boolean parameter names" n_param = GetNBoolParam() - param_name_pp = ctypes.cast(NESTGPU_GetBoolParamNames(), - ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetBoolParamNames(), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list @@ -2215,52 +2551,58 @@ def GetBoolParamNames(): NESTGPU_IsBoolParam = _nestgpu.NESTGPU_IsBoolParam NESTGPU_IsBoolParam.argtypes = (c_char_p,) NESTGPU_IsBoolParam.restype = ctypes.c_int + + def IsBoolParam(param_name): "Check name of kernel boolean parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = (NESTGPU_IsBoolParam(c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsBoolParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_GetBoolParam = _nestgpu.NESTGPU_GetBoolParam NESTGPU_GetBoolParam.argtypes = (c_char_p,) NESTGPU_GetBoolParam.restype = ctypes.c_bool + + def GetBoolParam(param_name): "Get kernel boolean parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_GetBoolParam(c_param_name) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_SetBoolParam = _nestgpu.NESTGPU_SetBoolParam NESTGPU_SetBoolParam.argtypes = (c_char_p, ctypes.c_bool) NESTGPU_SetBoolParam.restype = ctypes.c_int + + def SetBoolParam(param_name, val): "Set kernel boolean parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_SetBoolParam(c_param_name, ctypes.c_bool(val)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + NESTGPU_GetNFloatParam = _nestgpu.NESTGPU_GetNFloatParam NESTGPU_GetNFloatParam.restype = ctypes.c_int + + def GetNFloatParam(): "Get number of kernel float parameters" - + ret = NESTGPU_GetNFloatParam() if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -2269,18 +2611,19 @@ def GetNFloatParam(): NESTGPU_GetFloatParamNames = _nestgpu.NESTGPU_GetFloatParamNames NESTGPU_GetFloatParamNames.restype = ctypes.POINTER(c_char_p) + + def GetFloatParamNames(): "Get list of kernel float parameter names" n_param = GetNFloatParam() - param_name_pp = ctypes.cast(NESTGPU_GetFloatParamNames(), - ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetFloatParamNames(), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list @@ -2289,43 +2632,46 @@ def GetFloatParamNames(): NESTGPU_IsFloatParam = _nestgpu.NESTGPU_IsFloatParam NESTGPU_IsFloatParam.argtypes = (c_char_p,) NESTGPU_IsFloatParam.restype = ctypes.c_int + + def IsFloatParam(param_name): "Check name of kernel float parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = (NESTGPU_IsFloatParam(c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsFloatParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_GetFloatParam = _nestgpu.NESTGPU_GetFloatParam NESTGPU_GetFloatParam.argtypes = (c_char_p,) NESTGPU_GetFloatParam.restype = ctypes.c_float + + def GetFloatParam(param_name): "Get kernel float parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_GetFloatParam(c_param_name) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_SetFloatParam = _nestgpu.NESTGPU_SetFloatParam NESTGPU_SetFloatParam.argtypes = (c_char_p, ctypes.c_float) NESTGPU_SetFloatParam.restype = ctypes.c_int + + def SetFloatParam(param_name, val): "Set kernel float parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_SetFloatParam(c_param_name, ctypes.c_float(val)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret @@ -2333,9 +2679,11 @@ def SetFloatParam(param_name, val): NESTGPU_GetNIntParam = _nestgpu.NESTGPU_GetNIntParam NESTGPU_GetNIntParam.restype = ctypes.c_int + + def GetNIntParam(): "Get number of kernel int parameters" - + ret = NESTGPU_GetNIntParam() if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) @@ -2344,18 +2692,19 @@ def GetNIntParam(): NESTGPU_GetIntParamNames = _nestgpu.NESTGPU_GetIntParamNames NESTGPU_GetIntParamNames.restype = ctypes.POINTER(c_char_p) + + def GetIntParamNames(): "Get list of kernel int parameter names" n_param = GetNIntParam() - param_name_pp = ctypes.cast(NESTGPU_GetIntParamNames(), - ctypes.POINTER(c_char_p)) + param_name_pp = ctypes.cast(NESTGPU_GetIntParamNames(), ctypes.POINTER(c_char_p)) param_name_list = [] for i in range(n_param): param_name_p = param_name_pp[i] param_name = ctypes.cast(param_name_p, ctypes.c_char_p).value param_name_list.append(to_def_str(param_name)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return param_name_list @@ -2364,65 +2713,69 @@ def GetIntParamNames(): NESTGPU_IsIntParam = _nestgpu.NESTGPU_IsIntParam NESTGPU_IsIntParam.argtypes = (c_char_p,) NESTGPU_IsIntParam.restype = ctypes.c_int + + def IsIntParam(param_name): "Check name of kernel int parameter" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) - ret = (NESTGPU_IsIntParam(c_param_name)!=0) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) + ret = NESTGPU_IsIntParam(c_param_name) != 0 if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_GetIntParam = _nestgpu.NESTGPU_GetIntParam NESTGPU_GetIntParam.argtypes = (c_char_p,) NESTGPU_GetIntParam.restype = ctypes.c_int + + def GetIntParam(param_name): "Get kernel int parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_GetIntParam(c_param_name) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret - + NESTGPU_SetIntParam = _nestgpu.NESTGPU_SetIntParam NESTGPU_SetIntParam.argtypes = (c_char_p, ctypes.c_int) NESTGPU_SetIntParam.restype = ctypes.c_int + + def SetIntParam(param_name, val): "Set kernel int parameter value" - c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), - len(param_name)+1) + c_param_name = ctypes.create_string_buffer(to_byte_str(param_name), len(param_name) + 1) ret = NESTGPU_SetIntParam(c_param_name, ctypes.c_int(val)) - + if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) return ret + def GetKernelStatus(var_key=None): "Get kernel status" - if (type(var_key)==list) | (type(var_key)==tuple): + if (type(var_key) == list) | (type(var_key) == tuple): status_list = [] for var_elem in var_key: var_value = GetKernelStatus(var_elem) status_list.append(var_value) return status_list - elif (var_key==None): + elif var_key == None: status_dict = {} name_list = GetFloatParamNames() + GetIntParamNames() + GetBoolParamNames() for param_name in name_list: val = GetKernelStatus(param_name) status_dict[param_name] = val return status_dict - elif (type(var_key)==str) | (type(var_key)==bytes): + elif (type(var_key) == str) | (type(var_key) == bytes): if IsFloatParam(var_key): - return GetFloatParam(var_key) + return GetFloatParam(var_key) elif IsIntParam(var_key): return GetIntParam(var_key) elif IsBoolParam(var_key): @@ -2432,14 +2785,15 @@ def GetKernelStatus(var_key=None): else: raise ValueError("Unknown key type in GetSynGroupStatus", type(var_key)) + def SetKernelStatus(params, val=None): "Set kernel parameters using dictionaries" - if ((type(params)==dict) & (val==None)): + if (type(params) == dict) & (val == None): for param_name in params: SetKernelStatus(param_name, params[param_name]) - elif (type(params)==str): + elif type(params) == str: if IsFloatParam(params): - return SetFloatParam(params, val) + return SetFloatParam(params, val) elif IsIntParam(params): return SetIntParam(params, val) elif IsBoolParam(params): @@ -2447,28 +2801,28 @@ def SetKernelStatus(params, val=None): else: raise ValueError("Unknown parameter in SetKernelStatus", params) else: - raise ValueError("Wrong argument in SetKernelStatus") + raise ValueError("Wrong argument in SetKernelStatus") if GetErrorCode() != 0: raise ValueError(GetErrorMessage()) NESTGPU_RemoteCreate = _nestgpu.NESTGPU_RemoteCreate -NESTGPU_RemoteCreate.argtypes = (ctypes.c_int, c_char_p, ctypes.c_int, - ctypes.c_int) +NESTGPU_RemoteCreate.argtypes = (ctypes.c_int, c_char_p, ctypes.c_int, ctypes.c_int) NESTGPU_Create.restype = ctypes.c_int + + def RemoteCreate(i_host, model_name, n_node=1, n_ports=1, status_dict=None): "Create a remote neuron group" - if (type(status_dict)==dict): + if type(status_dict) == dict: remote_node_group = RemoteCreate(i_host, model_name, n_node, n_ports) SetStatus(remote_node_group, status_dict) return remote_node_group - - elif status_dict!=None: + + elif status_dict != None: raise ValueError("Wrong argument in RemoteCreate") - - c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), len(model_name)+1) - i_node = NESTGPU_RemoteCreate(ctypes.c_int(i_host), c_model_name, ctypes.c_int(n_node), - ctypes.c_int(n_ports)) + + c_model_name = ctypes.create_string_buffer(to_byte_str(model_name), len(model_name) + 1) + i_node = NESTGPU_RemoteCreate(ctypes.c_int(i_host), c_model_name, ctypes.c_int(n_node), ctypes.c_int(n_ports)) node_seq = NodeSeq(i_node, n_node) ret = RemoteNodeSeq(i_host, node_seq) if GetErrorCode() != 0: diff --git a/src/aeif_cond_alpha.cu b/src/aeif_cond_alpha.cu index 65cccda6f..3c9cc58ad 100644 --- a/src/aeif_cond_alpha.cu +++ b/src/aeif_cond_alpha.cu @@ -80,7 +80,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, aeif_cond_alpha_rk5 data_struct) @@ -109,7 +109,7 @@ int aeif_cond_alpha::Init(int i_node_0, int n_node, int n_port, n_group_param_ = N_GROUP_PARAM; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = aeif_cond_alpha_scal_var_name; scal_param_name_ = aeif_cond_alpha_scal_param_name; group_param_name_ = aeif_cond_alpha_group_param_name; @@ -119,7 +119,7 @@ int aeif_cond_alpha::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -141,7 +141,7 @@ int aeif_cond_alpha::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/aeif_cond_alpha.h b/src/aeif_cond_alpha.h index 226241fe9..267420bdf 100644 --- a/src/aeif_cond_alpha.h +++ b/src/aeif_cond_alpha.h @@ -45,7 +45,7 @@ Conductance-based adaptive exponential integrate-and-fire neuron model Description +++++++++++ -``aeif_cond_alpha`` is a conductance-based adaptive exponential +``aeif_cond_alpha`` is a conductance-based adaptive exponential integrate-and-fire neuron model according to [1]_ with synaptic conductance modeled by an alpha function, as described in [2]_ @@ -123,9 +123,9 @@ tau_syn_in ms Time constant of inhibitory synaptic conductance ============= ======= ========================================================= **Integration parameters** ------------------------------------------------------------------------------- -h0_rel real Starting step in ODE integration relative to time +h0_rel real Starting step in ODE integration relative to time resolution -h_min_rel real Minimum step in ODE integration relative to time +h_min_rel real Minimum step in ODE integration relative to time resolution ============= ======= ========================================================= @@ -162,22 +162,22 @@ class aeif_cond_alpha : public BaseNeuron float h_min_; float h_; aeif_cond_alpha_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/aeif_cond_alpha_kernel.h b/src/aeif_cond_alpha_kernel.h index 52a31ec17..d837f165e 100644 --- a/src/aeif_cond_alpha_kernel.h +++ b/src/aeif_cond_alpha_kernel.h @@ -159,7 +159,7 @@ const std::string aeif_cond_alpha_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/aeif_cond_alpha_multisynapse.cu b/src/aeif_cond_alpha_multisynapse.cu index 3a03604a5..e5b25791d 100644 --- a/src/aeif_cond_alpha_multisynapse.cu +++ b/src/aeif_cond_alpha_multisynapse.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -81,7 +81,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, aeif_cond_alpha_multisynapse_rk5 data_struct) @@ -113,7 +113,7 @@ int aeif_cond_alpha_multisynapse::Init(int i_node_0, int n_node, int n_port, n_param_ = n_scal_param_ + n_port_param_*n_port; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = aeif_cond_alpha_multisynapse_scal_var_name; port_var_name_= aeif_cond_alpha_multisynapse_port_var_name; scal_param_name_ = aeif_cond_alpha_multisynapse_scal_param_name; @@ -125,7 +125,7 @@ int aeif_cond_alpha_multisynapse::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -149,7 +149,7 @@ int aeif_cond_alpha_multisynapse::Calibrate(double time_min, float time_resoluti h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/aeif_cond_alpha_multisynapse.h b/src/aeif_cond_alpha_multisynapse.h index 4a72b895d..cd357c113 100644 --- a/src/aeif_cond_alpha_multisynapse.h +++ b/src/aeif_cond_alpha_multisynapse.h @@ -46,7 +46,7 @@ Conductance-based adaptive exponential integrate-and-fire neuron model Description +++++++++++ -``aeif_cond_alpha_multisynapse`` is a conductance-based adaptive exponential +``aeif_cond_alpha_multisynapse`` is a conductance-based adaptive exponential integrate-and-fire neuron model according to [1]_ with multiple synaptic time constants, and synaptic conductance modeled by an alpha function. @@ -134,9 +134,9 @@ tau_syn list of ms Time constant of synaptic conductance ============= ======= ========================================================= **Integration parameters** ------------------------------------------------------------------------------- -h0_rel real Starting step in ODE integration relative to time +h0_rel real Starting step in ODE integration relative to time resolution -h_min_rel real Minimum step in ODE integration relative to time +h_min_rel real Minimum step in ODE integration relative to time resolution ============= ======= ========================================================= @@ -174,22 +174,22 @@ class aeif_cond_alpha_multisynapse : public BaseNeuron float h_min_; float h_; aeif_cond_alpha_multisynapse_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/aeif_cond_alpha_multisynapse_kernel.h b/src/aeif_cond_alpha_multisynapse_kernel.h index b4d206f85..f83aca212 100644 --- a/src/aeif_cond_alpha_multisynapse_kernel.h +++ b/src/aeif_cond_alpha_multisynapse_kernel.h @@ -112,7 +112,7 @@ const std::string aeif_cond_alpha_multisynapse_scal_param_name[N_SCAL_PARAM] = { const std::string aeif_cond_alpha_multisynapse_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_syn", - "g0" + "g0" }; const std::string aeif_cond_alpha_multisynapse_group_param_name[N_GROUP_PARAM] = { @@ -157,7 +157,7 @@ const std::string aeif_cond_alpha_multisynapse_group_param_name[N_GROUP_PARAM] = #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/aeif_cond_beta.cu b/src/aeif_cond_beta.cu index 7d9b196d9..e50c65ad8 100644 --- a/src/aeif_cond_beta.cu +++ b/src/aeif_cond_beta.cu @@ -39,7 +39,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, aeif_cond_beta_rk5 data_struct) { //int array_idx = threadIdx.x + blockIdx.x * blockDim.x; - + V_th = -50.4; Delta_T = 2.0; g_L = 30.0; @@ -59,7 +59,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, tau_decay_in = 20.0; tau_rise_ex = 2.0; tau_rise_in = 2.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -74,9 +74,9 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, float *param, aeif_cond_beta_rk5 data_struct) { //int array_idx = threadIdx.x + blockIdx.x * blockDim.x; - + refractory_step = 0; - + // denominator is computed here to check that it is != 0 float denom1 = tau_decay_ex - tau_rise_ex; float denom2 = 0; @@ -113,7 +113,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, aeif_cond_beta_rk5 data_struct) @@ -153,7 +153,7 @@ int aeif_cond_beta::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -175,7 +175,7 @@ int aeif_cond_beta::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/aeif_cond_beta.h b/src/aeif_cond_beta.h index b76f9cdf9..35ca610c3 100644 --- a/src/aeif_cond_beta.h +++ b/src/aeif_cond_beta.h @@ -46,7 +46,7 @@ Conductance-based adaptive exponential integrate-and-fire neuron model Description +++++++++++ -``aeif_cond_beta`` is a conductance-based adaptive exponential +``aeif_cond_beta`` is a conductance-based adaptive exponential integrate-and-fire neuron model according to [1]_ with synaptic conductance modeled by a beta function, as described in [2]_. @@ -126,9 +126,9 @@ tau_decay_in ms Decay time constant of inhibitory synaptic conductanc ========= ======= ========================================================= **Integration parameters** --------------------------------------------------------------------------- -h0_rel real Starting step in ODE integration relative to time +h0_rel real Starting step in ODE integration relative to time resolution -h_min_rel real Minimum step in ODE integration relative to time +h_min_rel real Minimum step in ODE integration relative to time resolution ========= ======= ========================================================= @@ -166,22 +166,22 @@ class aeif_cond_beta : public BaseNeuron float h_min_; float h_; aeif_cond_beta_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/aeif_cond_beta_kernel.h b/src/aeif_cond_beta_kernel.h index f324342c5..676284705 100644 --- a/src/aeif_cond_beta_kernel.h +++ b/src/aeif_cond_beta_kernel.h @@ -166,7 +166,7 @@ const std::string aeif_cond_beta_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/aeif_cond_beta_multisynapse.cu b/src/aeif_cond_beta_multisynapse.cu index f184dab7e..2b75c4c1d 100644 --- a/src/aeif_cond_beta_multisynapse.cu +++ b/src/aeif_cond_beta_multisynapse.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -99,7 +99,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, aeif_cond_beta_multisynapse_rk5 data_struct) @@ -143,7 +143,7 @@ int aeif_cond_beta_multisynapse::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -167,7 +167,7 @@ int aeif_cond_beta_multisynapse::Calibrate(double time_min, float time_resolutio h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/aeif_cond_beta_multisynapse.h b/src/aeif_cond_beta_multisynapse.h index e207a8326..d77e219c9 100644 --- a/src/aeif_cond_beta_multisynapse.h +++ b/src/aeif_cond_beta_multisynapse.h @@ -46,7 +46,7 @@ Conductance-based adaptive exponential integrate-and-fire neuron model Description +++++++++++ -``aeif_cond_beta_multisynapse`` is a conductance-based adaptive exponential +``aeif_cond_beta_multisynapse`` is a conductance-based adaptive exponential integrate-and-fire neuron model according to [1]_ with multiple synaptic rise time and decay time constants, and synaptic conductance modeled by a beta function. @@ -136,9 +136,9 @@ tau_decay list of ms Decay time constant of synaptic conductance ========= ======= ========================================================= **Integration parameters** --------------------------------------------------------------------------- -h0_rel real Starting step in ODE integration relative to time +h0_rel real Starting step in ODE integration relative to time resolution -h_min_rel real Minimum step in ODE integration relative to time +h_min_rel real Minimum step in ODE integration relative to time resolution ========= ======= ========================================================= @@ -176,22 +176,22 @@ class aeif_cond_beta_multisynapse : public BaseNeuron float h_min_; float h_; aeif_cond_beta_multisynapse_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/aeif_cond_beta_multisynapse_kernel.h b/src/aeif_cond_beta_multisynapse_kernel.h index 798cfa871..643a9c6d6 100644 --- a/src/aeif_cond_beta_multisynapse_kernel.h +++ b/src/aeif_cond_beta_multisynapse_kernel.h @@ -115,7 +115,7 @@ const std::string aeif_cond_beta_multisynapse_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_rise", "tau_decay", - "g0" + "g0" }; const std::string aeif_cond_beta_multisynapse_group_param_name[N_GROUP_PARAM] = { @@ -161,7 +161,7 @@ const std::string aeif_cond_beta_multisynapse_group_param_name[N_GROUP_PARAM] = #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/aeif_psc_alpha.cu b/src/aeif_psc_alpha.cu index 401c59dc8..868a2683a 100644 --- a/src/aeif_psc_alpha.cu +++ b/src/aeif_psc_alpha.cu @@ -53,7 +53,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0.0; refractory_step = 0; @@ -104,7 +104,7 @@ int aeif_psc_alpha::Init(int i_node_0, int n_node, int n_port, node_type_ = i_aeif_psc_alpha_model; n_scal_var_ = N_SCAL_VAR; n_scal_param_ = N_SCAL_PARAM; - n_group_param_ = N_GROUP_PARAM; + n_group_param_ = N_GROUP_PARAM; n_var_ = n_scal_var_; n_param_ = n_scal_param_; @@ -128,7 +128,7 @@ int aeif_psc_alpha::Init(int i_node_0, int n_node, int n_port, port_weight_arr_ = GetParamArr() + GetScalParamIdx("I0_ex"); port_weight_arr_step_ = n_param_; port_weight_port_step_ = 1; - + port_input_arr_ = GetVarArr() + GetScalVarIdx("I1_syn_ex"); port_input_arr_step_ = n_var_; port_input_port_step_ = 1; @@ -142,7 +142,7 @@ int aeif_psc_alpha::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/aeif_psc_alpha.h b/src/aeif_psc_alpha.h index be7e7e6a0..4f27a63ad 100644 --- a/src/aeif_psc_alpha.h +++ b/src/aeif_psc_alpha.h @@ -121,9 +121,9 @@ The following parameters can be set in the status dictionary. ============= ======= ========================================================= **Integration parameters** ------------------------------------------------------------------------------- -h0_rel real Starting step in ODE integration relative to time +h0_rel real Starting step in ODE integration relative to time resolution -h_min_rel real Minimum step in ODE integration relative to time +h_min_rel real Minimum step in ODE integration relative to time resolution ============= ======= ========================================================= @@ -156,22 +156,22 @@ class aeif_psc_alpha : public BaseNeuron float h_min_; float h_; aeif_psc_alpha_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/aeif_psc_alpha_kernel.h b/src/aeif_psc_alpha_kernel.h index c396c02b1..a7582893f 100644 --- a/src/aeif_psc_alpha_kernel.h +++ b/src/aeif_psc_alpha_kernel.h @@ -161,7 +161,7 @@ __device__ aeif_psc_alpha_rk5 data_struct) { float I_syn_tot = 0.0; - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); I_syn_tot += I_syn_ex - I_syn_in; diff --git a/src/aeif_psc_alpha_multisynapse.cu b/src/aeif_psc_alpha_multisynapse.cu index db8d2db14..dda6a1047 100644 --- a/src/aeif_psc_alpha_multisynapse.cu +++ b/src/aeif_psc_alpha_multisynapse.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0.0; refractory_step = 0; @@ -136,7 +136,7 @@ int aeif_psc_alpha_multisynapse::Init(int i_node_0, int n_node, int n_port, + GetPortParamIdx("I0"); port_weight_arr_step_ = n_param_; port_weight_port_step_ = n_port_param_; - + port_input_arr_ = GetVarArr() + n_scal_var_ + GetPortVarIdx("I1_syn"); port_input_arr_step_ = n_var_; @@ -151,7 +151,7 @@ int aeif_psc_alpha_multisynapse::Calibrate(double time_min, float time_resolutio h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/aeif_psc_alpha_multisynapse.h b/src/aeif_psc_alpha_multisynapse.h index d01e8f223..705d8837a 100644 --- a/src/aeif_psc_alpha_multisynapse.h +++ b/src/aeif_psc_alpha_multisynapse.h @@ -120,9 +120,9 @@ The following parameters can be set in the status dictionary. ============= ======= ========================================================= **Integration parameters** ------------------------------------------------------------------------------- -h0_rel real Starting step in ODE integration relative to time +h0_rel real Starting step in ODE integration relative to time resolution -h_min_rel real Minimum step in ODE integration relative to time +h_min_rel real Minimum step in ODE integration relative to time resolution ============= ======= ========================================================= @@ -155,22 +155,22 @@ class aeif_psc_alpha_multisynapse : public BaseNeuron float h_min_; float h_; aeif_psc_alpha_multisynapse_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/aeif_psc_alpha_multisynapse_kernel.h b/src/aeif_psc_alpha_multisynapse_kernel.h index 61a34e895..520c8028f 100644 --- a/src/aeif_psc_alpha_multisynapse_kernel.h +++ b/src/aeif_psc_alpha_multisynapse_kernel.h @@ -163,7 +163,7 @@ __device__ { enum { n_port = (NVAR-N_SCAL_VAR)/N_PORT_VAR }; float I_syn_tot = 0.0; - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); for (int i = 0; i<n_port; i++) { diff --git a/src/aeif_psc_delta.cu b/src/aeif_psc_delta.cu index 2b4d6adb1..ed6405f91 100644 --- a/src/aeif_psc_delta.cu +++ b/src/aeif_psc_delta.cu @@ -53,7 +53,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -104,7 +104,7 @@ int aeif_psc_delta::Init(int i_node_0, int n_node, int n_port, n_param_ = n_scal_param_ + n_port_param_*n_port; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = aeif_psc_delta_scal_var_name; scal_param_name_ = aeif_psc_delta_scal_param_name; group_param_name_ = aeif_psc_delta_group_param_name; @@ -114,7 +114,7 @@ int aeif_psc_delta::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -140,13 +140,13 @@ int aeif_psc_delta::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } int aeif_psc_delta::Update(long long it, double t1) { rk5_.Update<N_SCAL_VAR, N_SCAL_PARAM>(t1, h_min_, rk5_data_struct_); - + return 0; } diff --git a/src/aeif_psc_delta.h b/src/aeif_psc_delta.h index 3824991b9..43690defb 100644 --- a/src/aeif_psc_delta.h +++ b/src/aeif_psc_delta.h @@ -119,9 +119,9 @@ The following parameters can be set in the status dictionary. ============= ======= ========================================================= **Integration parameters** ------------------------------------------------------------------------------- -h0_rel real Starting step in ODE integration relative to time +h0_rel real Starting step in ODE integration relative to time resolution -h_min_rel real Minimum step in ODE integration relative to time +h_min_rel real Minimum step in ODE integration relative to time resolution ============= ======= ========================================================= @@ -155,22 +155,22 @@ class aeif_psc_delta : public BaseNeuron float h_min_; float h_; aeif_psc_delta_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + }; #endif diff --git a/src/aeif_psc_delta_kernel.h b/src/aeif_psc_delta_kernel.h index c193fd76a..7813add27 100644 --- a/src/aeif_psc_delta_kernel.h +++ b/src/aeif_psc_delta_kernel.h @@ -131,13 +131,13 @@ const std::string aeif_psc_delta_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, aeif_psc_delta_rk5 data_struct) { - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); float V_spike = Delta_T == 0. ? 0. : Delta_T*exp((V - V_th)/Delta_T); diff --git a/src/aeif_psc_exp.cu b/src/aeif_psc_exp.cu index 07431485d..4b89ba69f 100644 --- a/src/aeif_psc_exp.cu +++ b/src/aeif_psc_exp.cu @@ -53,7 +53,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + I_syn_ex = 0; I_syn_in = 0; V_m = E_L; @@ -102,13 +102,13 @@ int aeif_psc_exp::Init(int i_node_0, int n_node, int n_port, node_type_ = i_aeif_psc_exp_model; n_scal_var_ = N_SCAL_VAR; n_scal_param_ = N_SCAL_PARAM; - n_group_param_ = N_GROUP_PARAM; + n_group_param_ = N_GROUP_PARAM; n_var_ = n_scal_var_; n_param_ = n_scal_param_; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = aeif_psc_exp_scal_var_name; scal_param_name_ = aeif_psc_exp_scal_param_name; group_param_name_ = aeif_psc_exp_group_param_name; @@ -118,7 +118,7 @@ int aeif_psc_exp::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -144,7 +144,7 @@ int aeif_psc_exp::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/aeif_psc_exp.h b/src/aeif_psc_exp.h index 4f7f4a37a..4638b606b 100644 --- a/src/aeif_psc_exp.h +++ b/src/aeif_psc_exp.h @@ -47,7 +47,7 @@ Description +++++++++++ aeif_psc_exp is the adaptive exponential integrate and fire neuron -according to [1]_, with postsynaptic currents in the form of +according to [1]_, with postsynaptic currents in the form of truncated exponentials. This implementation uses the embedded 5th order Runge-Kutta @@ -125,9 +125,9 @@ The following parameters can be set in the status dictionary. ============= ======= ========================================================= **Integration parameters** ------------------------------------------------------------------------------- -h0_rel real Starting step in ODE integration relative to time +h0_rel real Starting step in ODE integration relative to time resolution -h_min_rel real Minimum step in ODE integration relative to time +h_min_rel real Minimum step in ODE integration relative to time resolution ============= ======= ========================================================= @@ -161,22 +161,22 @@ class aeif_psc_exp : public BaseNeuron float h_min_; float h_; aeif_psc_exp_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/aeif_psc_exp_kernel.h b/src/aeif_psc_exp_kernel.h index 4314266ed..63e045d56 100644 --- a/src/aeif_psc_exp_kernel.h +++ b/src/aeif_psc_exp_kernel.h @@ -139,7 +139,7 @@ const std::string aeif_psc_exp_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/aeif_psc_exp_multisynapse.cu b/src/aeif_psc_exp_multisynapse.cu index 9262f0af5..9b362c771 100644 --- a/src/aeif_psc_exp_multisynapse.cu +++ b/src/aeif_psc_exp_multisynapse.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -111,7 +111,7 @@ int aeif_psc_exp_multisynapse::Init(int i_node_0, int n_node, int n_port, n_param_ = n_scal_param_ + n_port_param_*n_port; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = aeif_psc_exp_multisynapse_scal_var_name; port_var_name_= aeif_psc_exp_multisynapse_port_var_name; scal_param_name_ = aeif_psc_exp_multisynapse_scal_param_name; @@ -123,7 +123,7 @@ int aeif_psc_exp_multisynapse::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -150,7 +150,7 @@ int aeif_psc_exp_multisynapse::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/aeif_psc_exp_multisynapse.h b/src/aeif_psc_exp_multisynapse.h index 4ba576183..2591c07c5 100644 --- a/src/aeif_psc_exp_multisynapse.h +++ b/src/aeif_psc_exp_multisynapse.h @@ -47,7 +47,7 @@ Description +++++++++++ aeif_psc_exp_multisynapse is the adaptive exponential integrate and fire neuron -according to [1]_, with postsynaptic currents in the form of +according to [1]_, with postsynaptic currents in the form of truncated exponentials. This implementation uses the embedded 5th order Runge-Kutta @@ -123,9 +123,9 @@ The following parameters can be set in the status dictionary. ============= ======= ========================================================= **Integration parameters** ------------------------------------------------------------------------------- -h0_rel real Starting step in ODE integration relative to time +h0_rel real Starting step in ODE integration relative to time resolution -h_min_rel real Minimum step in ODE integration relative to time +h_min_rel real Minimum step in ODE integration relative to time resolution ============= ======= ========================================================= @@ -159,22 +159,22 @@ class aeif_psc_exp_multisynapse : public BaseNeuron float h_min_; float h_; aeif_psc_exp_multisynapse_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/aeif_psc_exp_multisynapse_kernel.h b/src/aeif_psc_exp_multisynapse_kernel.h index 7b735dc63..a0b05182a 100644 --- a/src/aeif_psc_exp_multisynapse_kernel.h +++ b/src/aeif_psc_exp_multisynapse_kernel.h @@ -147,7 +147,7 @@ const std::string aeif_psc_exp_multisynapse_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, @@ -155,7 +155,7 @@ __device__ { enum { n_port = (NVAR-N_SCAL_VAR)/N_PORT_VAR }; float I_syn_tot = 0.0; - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); for (int i = 0; i<n_port; i++) { diff --git a/src/base_neuron.cu b/src/base_neuron.cu index 3439b1da0..181166c96 100644 --- a/src/base_neuron.cu +++ b/src/base_neuron.cu @@ -177,7 +177,7 @@ int BaseNeuron::Init(int i_node_0, int n_node, int n_port, den_delay_arr_ = NULL; // array of dendritic backward delays return 0; -} +} // allocate state-variable array int BaseNeuron::AllocVarArr() @@ -230,7 +230,7 @@ int BaseNeuron::SetScalParam(int i_neuron, int n_neuron, (param_pt, n_neuron, n_param_, val); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } @@ -255,7 +255,7 @@ int BaseNeuron::SetScalParam(int *i_neuron, int n_neuron, gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk(cudaFree(d_i_neuron)); - + return 0; } @@ -277,7 +277,7 @@ int BaseNeuron::SetPortParam(int i_neuron, int n_neuron, "to the number of ports."); } float *param_pt; - + for (int i_vect=0; i_vect<vect_size; i_vect++) { param_pt = GetParamPt(i_neuron, param_name, i_vect); BaseNeuronSetFloatArray<<<(n_neuron+1023)/1024, 1024>>> @@ -375,7 +375,7 @@ int BaseNeuron::SetIntVar(int i_neuron, int n_neuron, (var_pt, n_neuron, 1, val); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } @@ -400,7 +400,7 @@ int BaseNeuron::SetIntVar(int *i_neuron, int n_neuron, gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk(cudaFree(d_i_neuron)); - + return 0; } @@ -421,7 +421,7 @@ int BaseNeuron::SetScalVar(int i_neuron, int n_neuron, (var_pt, n_neuron, n_var_, val); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } @@ -446,7 +446,7 @@ int BaseNeuron::SetScalVar(int *i_neuron, int n_neuron, gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk(cudaFree(d_i_neuron)); - + return 0; } @@ -468,7 +468,7 @@ int BaseNeuron::SetPortVar(int i_neuron, int n_neuron, "to the number of ports."); } float *var_pt; - + for (int i_vect=0; i_vect<vect_size; i_vect++) { var_pt = GetVarPt(i_neuron, var_name, i_vect); BaseNeuronSetFloatArray<<<(n_neuron+1023)/1024, 1024>>> @@ -556,11 +556,11 @@ float *BaseNeuron::GetScalParam(int i_neuron, int n_neuron, (param_pt, d_param_arr, n_neuron, n_param_, 1); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + gpuErrchk(cudaMemcpy(h_param_arr, d_param_arr, n_neuron*sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_param_arr)); - + return h_param_arr; } @@ -583,7 +583,7 @@ float *BaseNeuron::GetScalParam(int *i_neuron, int n_neuron, float *d_param_arr; gpuErrchk(cudaMalloc(&d_param_arr, n_neuron*sizeof(float))); float *h_param_arr = (float*)malloc(n_neuron*sizeof(float)); - + BaseNeuronGetFloatPtArray<<<(n_neuron+1023)/1024, 1024>>> (param_pt, d_param_arr, d_i_neuron, n_neuron, n_param_, 1); gpuErrchk( cudaPeekAtLastError() ); @@ -613,7 +613,7 @@ float *BaseNeuron::GetPortParam(int i_neuron, int n_neuron, float *d_param_arr; gpuErrchk(cudaMalloc(&d_param_arr, n_neuron*n_port_*sizeof(float))); float *h_param_arr = (float*)malloc(n_neuron*n_port_*sizeof(float)); - + for (int port=0; port<n_port_; port++) { param_pt = GetParamPt(i_neuron, param_name, port); BaseNeuronGetFloatArray<<<(n_neuron+1023)/1024, 1024>>> @@ -625,7 +625,7 @@ float *BaseNeuron::GetPortParam(int i_neuron, int n_neuron, gpuErrchk(cudaMemcpy(h_param_arr, d_param_arr, n_neuron*n_port_ *sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_param_arr)); - + return h_param_arr; } @@ -647,7 +647,7 @@ float *BaseNeuron::GetPortParam(int *i_neuron, int n_neuron, float *d_param_arr; gpuErrchk(cudaMalloc(&d_param_arr, n_neuron*n_port_*sizeof(float))); float *h_param_arr = (float*)malloc(n_neuron*n_port_*sizeof(float)); - + for (int port=0; port<n_port_; port++) { float *param_pt = GetParamPt(0, param_name, port); BaseNeuronGetFloatPtArray<<<(n_neuron+1023)/1024, 1024>>> @@ -657,11 +657,11 @@ float *BaseNeuron::GetPortParam(int *i_neuron, int n_neuron, gpuErrchk( cudaDeviceSynchronize() ); } gpuErrchk(cudaFree(d_i_neuron)); - + gpuErrchk(cudaMemcpy(h_param_arr, d_param_arr, n_neuron*n_port_ *sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_param_arr)); - + return h_param_arr; } @@ -682,12 +682,12 @@ float BaseNeuron::GetGroupParam(std::string param_name) return group_param_[i_param]; } } - + throw ngpu_exception(std::string("Unrecognized group parameter ") + param_name); } - + // get integer variable var_name of neurons // i_neuron, ..., i_neuron + n_neuron -1 int *BaseNeuron::GetIntVar(int i_neuron, int n_neuron, @@ -709,11 +709,11 @@ int *BaseNeuron::GetIntVar(int i_neuron, int n_neuron, (var_pt, d_var_arr, n_neuron, 1, 1); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + gpuErrchk(cudaMemcpy(h_var_arr, d_var_arr, n_neuron*sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_var_arr)); - + return h_var_arr; } @@ -736,7 +736,7 @@ int *BaseNeuron::GetIntVar(int *i_neuron, int n_neuron, int *d_var_arr; gpuErrchk(cudaMalloc(&d_var_arr, n_neuron*sizeof(int))); int *h_var_arr = (int*)malloc(n_neuron*sizeof(int)); - + BaseNeuronGetIntPtArray<<<(n_neuron+1023)/1024, 1024>>> (var_pt, d_var_arr, d_i_neuron, n_neuron, 1, 1); gpuErrchk( cudaPeekAtLastError() ); @@ -746,7 +746,7 @@ int *BaseNeuron::GetIntVar(int *i_neuron, int n_neuron, gpuErrchk(cudaMemcpy(h_var_arr, d_var_arr, n_neuron*sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_var_arr)); - + return h_var_arr; } @@ -771,11 +771,11 @@ float *BaseNeuron::GetScalVar(int i_neuron, int n_neuron, (var_pt, d_var_arr, n_neuron, n_var_, 1); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + gpuErrchk(cudaMemcpy(h_var_arr, d_var_arr, n_neuron*sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_var_arr)); - + return h_var_arr; } @@ -798,7 +798,7 @@ float *BaseNeuron::GetScalVar(int *i_neuron, int n_neuron, float *d_var_arr; gpuErrchk(cudaMalloc(&d_var_arr, n_neuron*sizeof(float))); float *h_var_arr = (float*)malloc(n_neuron*sizeof(float)); - + BaseNeuronGetFloatPtArray<<<(n_neuron+1023)/1024, 1024>>> (var_pt, d_var_arr, d_i_neuron, n_neuron, n_var_, 1); gpuErrchk( cudaPeekAtLastError() ); @@ -828,7 +828,7 @@ float *BaseNeuron::GetPortVar(int i_neuron, int n_neuron, float *d_var_arr; gpuErrchk(cudaMalloc(&d_var_arr, n_neuron*n_port_*sizeof(float))); float *h_var_arr = (float*)malloc(n_neuron*n_port_*sizeof(float)); - + for (int port=0; port<n_port_; port++) { var_pt = GetVarPt(i_neuron, var_name, port); BaseNeuronGetFloatArray<<<(n_neuron+1023)/1024, 1024>>> @@ -840,7 +840,7 @@ float *BaseNeuron::GetPortVar(int i_neuron, int n_neuron, gpuErrchk(cudaMemcpy(h_var_arr, d_var_arr, n_neuron*n_port_ *sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_var_arr)); - + return h_var_arr; } @@ -862,7 +862,7 @@ float *BaseNeuron::GetPortVar(int *i_neuron, int n_neuron, float *d_var_arr; gpuErrchk(cudaMalloc(&d_var_arr, n_neuron*n_port_*sizeof(float))); float *h_var_arr = (float*)malloc(n_neuron*n_port_*sizeof(float)); - + for (int port=0; port<n_port_; port++) { float *var_pt = GetVarPt(0, var_name, port); BaseNeuronGetFloatPtArray<<<(n_neuron+1023)/1024, 1024>>> @@ -871,11 +871,11 @@ float *BaseNeuron::GetPortVar(int *i_neuron, int n_neuron, gpuErrchk( cudaDeviceSynchronize() ); } gpuErrchk(cudaFree(d_i_neuron)); - + gpuErrchk(cudaMemcpy(h_var_arr, d_var_arr, n_neuron*n_port_ *sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_var_arr)); - + return h_var_arr; } @@ -897,7 +897,7 @@ int BaseNeuron::GetIntVarIdx(std::string var_name) throw ngpu_exception(std::string("Unrecognized integer variable ") + var_name); } - + return i_var; } @@ -912,7 +912,7 @@ int BaseNeuron::GetScalVarIdx(std::string var_name) throw ngpu_exception(std::string("Unrecognized scalar variable ") + var_name); } - + return i_var; } @@ -927,7 +927,7 @@ int BaseNeuron::GetPortVarIdx(std::string var_name) throw ngpu_exception(std::string("Unrecognized port variable ") + var_name); } - + return i_var; } @@ -942,13 +942,13 @@ int BaseNeuron::GetScalParamIdx(std::string param_name) throw ngpu_exception(std::string("Unrecognized parameter ") + param_name); } - + return i_param; } // get index of receptor-port parameter param_name int BaseNeuron::GetPortParamIdx(std::string param_name) -{ +{ int i_param; for (i_param=0; i_param<n_port_param_; i_param++) { if (param_name == port_param_name_[i_param]) break; @@ -957,7 +957,7 @@ int BaseNeuron::GetPortParamIdx(std::string param_name) throw ngpu_exception(std::string("Unrecognized port parameter ") + param_name); } - + return i_param; } @@ -981,7 +981,7 @@ int BaseNeuron::GetArrayVarSize(int i_neuron, std::string var_name) + var_name); } - + // return array size for array parameter param_name // Must be defined in derived class int BaseNeuron::GetArrayParamSize(int i_neuron, std::string param_name) @@ -1083,7 +1083,7 @@ bool BaseNeuron::IsScalParam(std::string param_name) // check if param_name is a receptor-port parameter bool BaseNeuron::IsPortParam(std::string param_name) -{ +{ int i_param; for (i_param=0; i_param<n_port_param_; i_param++) { if (param_name == port_param_name_[i_param]) return true; @@ -1139,10 +1139,10 @@ int BaseNeuron::CheckPortIdx(int port) int *BaseNeuron::GetIntVarPt(int i_neuron, std::string var_name) { CheckNeuronIdx(i_neuron); - + if (IsIntVar(var_name)) { int i_var = GetIntVarIdx(var_name); - return int_var_pt_[i_var] + i_neuron; + return int_var_pt_[i_var] + i_neuron; } else { throw ngpu_exception(std::string("Unrecognized integer variable ") @@ -1159,7 +1159,7 @@ float *BaseNeuron::GetVarPt(int i_neuron, std::string var_name, if (port!=0) { CheckPortIdx(port); } - + if (IsScalVar(var_name)) { int i_var = GetScalVarIdx(var_name); return GetVarArr() + i_neuron*n_var_ + i_var; @@ -1199,7 +1199,7 @@ float *BaseNeuron::GetParamPt(int i_neuron, std::string param_name, } } -// return spike multiplicity (spike_height) of neuron i_neuron +// return spike multiplicity (spike_height) of neuron i_neuron // if neuron emitted a spike in the current time step // otherwise return 0 float BaseNeuron::GetSpikeActivity(int i_neuron) @@ -1212,7 +1212,7 @@ float BaseNeuron::GetSpikeActivity(int i_neuron) if (Ns==0) { return 0.0; } - + int is0; gpuErrchk(cudaMemcpy(&is0, d_SpikeBufferIdx0 + i_spike_buffer, sizeof(int), cudaMemcpyDeviceToHost)); @@ -1245,7 +1245,7 @@ std::vector<std::string> BaseNeuron::GetScalVarNames() for (int i=0; i<n_scal_var_; i++) { var_name_vect.push_back(scal_var_name_[i]); } - + return var_name_vect; } @@ -1268,11 +1268,11 @@ std::vector<std::string> BaseNeuron::GetPortVarNames() for (int i=0; i<n_port_var_; i++) { var_name_vect.push_back(port_var_name_[i]); } - + return var_name_vect; } -// get number of receptor-port variables +// get number of receptor-port variables int BaseNeuron::GetNPortVar() { return n_port_var_; @@ -1285,7 +1285,7 @@ std::vector<std::string> BaseNeuron::GetScalParamNames() for (int i=0; i<n_scal_param_; i++) { param_name_vect.push_back(scal_param_name_[i]); } - + return param_name_vect; } @@ -1302,7 +1302,7 @@ std::vector<std::string> BaseNeuron::GetPortParamNames() for (int i=0; i<n_port_param_; i++) { param_name_vect.push_back(port_param_name_[i]); } - + return param_name_vect; } @@ -1319,7 +1319,7 @@ std::vector<std::string> BaseNeuron::GetGroupParamNames() for (int i=0; i<n_group_param_; i++) { param_name_vect.push_back(group_param_name_[i]); } - + return param_name_vect; } @@ -1336,7 +1336,7 @@ std::vector<std::string> BaseNeuron::GetArrayVarNames() for (int i=0; i<GetNArrayVar(); i++) { var_name_vect.push_back(array_var_name_[i]); } - + return var_name_vect; } @@ -1353,7 +1353,7 @@ std::vector<std::string> BaseNeuron::GetArrayParamNames() for (int i=0; i<GetNArrayParam(); i++) { param_name_vect.push_back(array_param_name_[i]); } - + return param_name_vect; } @@ -1368,7 +1368,7 @@ int BaseNeuron::ActivateSpikeCount() { const std::string s = "spike_count"; if (std::find(int_var_name_.begin(), int_var_name_.end(), s) - == int_var_name_.end()) { // add it if not already present + == int_var_name_.end()) { // add it if not already present int_var_name_.push_back(s); gpuErrchk(cudaMalloc(&spike_count_, n_node_*sizeof(int))); @@ -1392,7 +1392,7 @@ int BaseNeuron::ActivateRecSpikeTimes(int max_n_rec_spike_times) } const std::string s = "n_rec_spike_times"; if (std::find(int_var_name_.begin(), int_var_name_.end(), s) - == int_var_name_.end()) { // add it if not already present + == int_var_name_.end()) { // add it if not already present int_var_name_.push_back(s); gpuErrchk(cudaMalloc(&n_rec_spike_times_, n_node_*sizeof(int))); @@ -1400,7 +1400,7 @@ int BaseNeuron::ActivateRecSpikeTimes(int max_n_rec_spike_times) (n_node_+1)*sizeof(int))); gpuErrchk(cudaMemset(n_rec_spike_times_, 0, n_node_*sizeof(int))); int_var_pt_.push_back(n_rec_spike_times_); - + max_n_rec_spike_times_ = max_n_rec_spike_times; gpuErrchk(cudaMalloc(&rec_spike_times_, n_node_*max_n_rec_spike_times *sizeof(int))); @@ -1433,7 +1433,7 @@ int BaseNeuron::GetNRecSpikeTimes(int i_neuron) throw ngpu_exception("Spike times recording was not activated"); } int n_spikes; - + gpuErrchk(cudaMemcpy(&n_spikes, &n_rec_spike_times_[i_neuron], sizeof(int), cudaMemcpyDeviceToHost)); return n_spikes; @@ -1458,12 +1458,12 @@ int BaseNeuron::SetNeuronGroupParam(std::string param_name, float val) // kernel for packing spike times of neurons // i_neuron, ..., i_neuron + n_neuron -1 -// in contiguous locations in GPU memory +// in contiguous locations in GPU memory __global__ void PackSpikeTimesKernel(int n_neuron, int *n_rec_spike_times_cumul, float *rec_spike_times, float *rec_spike_times_pack, int n_spike_tot, int max_n_rec_spike_times) { - // array_idx: index on one-dimensional packed spike array + // array_idx: index on one-dimensional packed spike array int array_idx = threadIdx.x + blockIdx.x * blockDim.x; if (array_idx<n_spike_tot) { // a locate of array_idx on the cumulative sum of the number of spikes @@ -1486,7 +1486,7 @@ __global__ void PackSpikeTimesKernel(int n_neuron, int *n_rec_spike_times_cumul, // extract recorded spike times // and put them in a buffer int BaseNeuron::BufferRecSpikeTimes() -{ +{ if(max_n_rec_spike_times_<=0) { throw ngpu_exception("Spike times recording was not activated"); } @@ -1520,7 +1520,7 @@ int BaseNeuron::BufferRecSpikeTimes() else { delete[] h_n_rec_spike_times_cumul; } - + return 0; } @@ -1554,12 +1554,12 @@ int BaseNeuron::GetRecSpikeTimes(int **n_spike_times_pt, float *spike_times_pack = spike_times_buffer_[i_buf]; int *n_spike_times_cumul = n_spike_times_cumul_buffer_[i_buf]; // array_idx: index of the first spike of node i_node - // on one-dimensional packed spike array + // on one-dimensional packed spike array int array_idx = n_spike_times_cumul[i_node]; int n_spike = n_spike_times_cumul[i_node+1] - array_idx; - + float *pt = spike_times_pack + array_idx; - // insert the spikes of node i_node in its spike vector + // insert the spikes of node i_node in its spike vector spike_times_vect_[i_node].insert(spike_times_vect_[i_node].begin()+k, pt, pt+n_spike); k += n_spike; @@ -1570,11 +1570,10 @@ int BaseNeuron::GetRecSpikeTimes(int **n_spike_times_pt, spike_times_pt_vect_[i_node] = spike_times_vect_[i_node].data(); } spike_times_buffer_.clear(); - n_spike_times_cumul_buffer_.clear(); - + n_spike_times_cumul_buffer_.clear(); + *n_spike_times_pt = n_spike_times_vect_.data(); *spike_times_pt = spike_times_pt_vect_.data(); - + return 0; } - diff --git a/src/base_neuron.h b/src/base_neuron.h index daa3c18fc..8553a4bfa 100644 --- a/src/base_neuron.h +++ b/src/base_neuron.h @@ -44,7 +44,7 @@ class BaseNeuron int n_node_; int n_port_; int i_group_; - unsigned long long *seed_; + unsigned long long *seed_; int n_int_var_; int n_scal_var_; @@ -54,7 +54,7 @@ class BaseNeuron int n_group_param_; int n_var_; int n_param_; - + double *get_spike_array_; float *port_weight_arr_; int port_weight_arr_step_; @@ -62,7 +62,7 @@ class BaseNeuron float *port_input_arr_; int port_input_arr_step_; int port_input_port_step_; - std::vector<int*> int_var_pt_; + std::vector<int*> int_var_pt_; float *var_arr_; float *param_arr_; float *group_param_; @@ -74,7 +74,7 @@ class BaseNeuron const std::string *group_param_name_; std::vector<std::string> array_var_name_; std::vector<std::string> array_param_name_; - + DirectConnection *d_dir_conn_array_; uint64_t n_dir_conn_; // = 0; bool has_dir_conn_; // = false; @@ -97,22 +97,22 @@ class BaseNeuron std::vector<float> port_input_vect_; std::vector<float> ext_neuron_input_spikes_; - + public: virtual ~BaseNeuron() {} - + virtual int Init(int i_node_0, int n_neuron, int n_port, int i_neuron_group, unsigned long long *seed); virtual int AllocVarArr(); - + virtual int AllocParamArr(); virtual int FreeVarArr(); - + virtual int FreeParamArr(); - + int GetNodeType() { return node_type_; } @@ -121,59 +121,59 @@ class BaseNeuron { return ext_neuron_flag_; } - + virtual int Calibrate(double time_min, float time_resolution) {return 0;} - + virtual int Update(long long it, double t1) {return 0;} - + virtual int GetX(int i_neuron, int n_neuron, double *x) {return 0;} - + virtual int GetY(int i_var, int i_neuron, int n_neuron, float *y) {return 0;} - - virtual int SetScalParam(int i_neuron, int n_neuron, std::string param_name, + + virtual int SetScalParam(int i_neuron, int n_neuron, std::string param_name, float val); virtual int SetScalParam(int *i_neuron, int n_neuron, std::string param_name, float val); - + virtual int SetPortParam(int i_neuron, int n_neuron, std::string param_name, float *param, int vect_size); - + virtual int SetPortParam(int *i_neuron, int n_neuron, std::string param_name, float *param, int vect_size); virtual int SetArrayParam(int i_neuron, int n_neuron, std::string param_name, float *array, int array_size); - + virtual int SetArrayParam(int *i_neuron, int n_neuron, std::string param_name, float *array, int array_size); virtual int SetGroupParam(std::string param_name, float val); - virtual int SetIntVar(int i_neuron, int n_neuron, std::string var_name, + virtual int SetIntVar(int i_neuron, int n_neuron, std::string var_name, int val); virtual int SetIntVar(int *i_neuron, int n_neuron, std::string var_name, int val); - virtual int SetScalVar(int i_neuron, int n_neuron, std::string var_name, + virtual int SetScalVar(int i_neuron, int n_neuron, std::string var_name, float val); virtual int SetScalVar(int *i_neuron, int n_neuron, std::string var_name, float val); - + virtual int SetPortVar(int i_neuron, int n_neuron, std::string var_name, float *var, int vect_size); - + virtual int SetPortVar(int *i_neuron, int n_neuron, std::string var_name, float *var, int vect_size); virtual int SetArrayVar(int i_neuron, int n_neuron, std::string var_name, float *array, int array_size); - + virtual int SetArrayVar(int *i_neuron, int n_neuron, std::string var_name, float *array, int array_size); @@ -215,7 +215,7 @@ class BaseNeuron virtual float *GetArrayVar(int i_neuron, std::string var_name); virtual int GetIntVarIdx(std::string var_name); - + virtual int GetScalVarIdx(std::string var_name); virtual int GetPortVarIdx(std::string var_name); @@ -229,7 +229,7 @@ class BaseNeuron virtual float *GetParamArr(); virtual int GetArrayVarSize(int i_neuron, std::string var_name); - + virtual int GetArrayParamSize(int i_neuron, std::string param_name); virtual int GetVarSize(std::string var_name); @@ -243,7 +243,7 @@ class BaseNeuron virtual bool IsPortVar(std::string var_name); virtual bool IsArrayVar(std::string var_name); - + virtual bool IsScalParam(std::string param_name); virtual bool IsPortParam(std::string param_name); @@ -257,10 +257,10 @@ class BaseNeuron int CheckPortIdx(int port); virtual int *GetIntVarPt(int i_neuron, std::string var_name); - + virtual float *GetVarPt(int i_neuron, std::string var_name, int port=0); - virtual float *GetParamPt(int i_neuron, std::string param_name, + virtual float *GetParamPt(int i_neuron, std::string param_name, int port=0); virtual float GetSpikeActivity(int i_neuron); @@ -269,33 +269,33 @@ class BaseNeuron virtual std::vector<std::string> GetIntVarNames(); virtual int GetNIntVar(); - + virtual std::vector<std::string> GetScalVarNames(); - + virtual int GetNScalVar(); virtual std::vector<std::string> GetPortVarNames(); - + virtual int GetNPortVar(); virtual std::vector<std::string> GetScalParamNames(); - + virtual int GetNScalParam(); virtual std::vector<std::string> GetPortParamNames(); - + virtual int GetNPortParam(); virtual std::vector<std::string> GetArrayVarNames(); - + virtual int GetNArrayVar(); virtual std::vector<std::string> GetArrayParamNames(); - + virtual int GetNArrayParam(); virtual std::vector<std::string> GetGroupParamNames(); - + virtual int GetNGroupParam(); virtual int ActivateSpikeCount(); @@ -305,7 +305,7 @@ class BaseNeuron virtual int GetNRecSpikeTimes(int i_neuron); virtual int BufferRecSpikeTimes(); - + virtual int GetRecSpikeTimes(int **n_spike_times_pt, float ***spike_times_pt); virtual int SetRecSpikeTimesStep(int rec_spike_times_step); diff --git a/src/connect.cu b/src/connect.cu index 8cbfda98c..9501e28e4 100644 --- a/src/connect.cu +++ b/src/connect.cu @@ -36,7 +36,7 @@ extern bool ConnectionSpikeTimeFlag; // provare a mettere nella classe? int NetConnection::Connect(int i_source, int i_target, unsigned char port, - unsigned char syn_group, float weight, float delay) + unsigned char syn_group, float weight, float delay) { if (delay<time_resolution_) { throw ngpu_exception("Delay must be >= time resolution"); @@ -45,11 +45,11 @@ int NetConnection::Connect(int i_source, int i_target, unsigned char port, if (syn_group>=1) { ConnectionSpikeTimeFlag=true; } - + int d_int = (int)round(delay/time_resolution_) - 1; TargetSyn tg = {i_target, port, syn_group, weight}; Insert(d_int, i_source, tg); - + return 0; } @@ -69,7 +69,7 @@ int NetConnection::Insert(int d_int, int i_source, TargetSyn tg) else { conn[id].target_vect.push_back(tg); } - + return 0; } @@ -97,7 +97,7 @@ int NetConnection::ConnGroupPrint(int i_source) } std::cout << std::endl; } - + return 0; } @@ -118,7 +118,7 @@ unsigned int NetConnection::StoredNConnections() if (n_conn_==0) { n_conn_ = NConnections(); } - + return n_conn_; } @@ -132,7 +132,7 @@ unsigned int NetConnection::NConnections() n_conn += n_target; } } - + return n_conn; } @@ -155,7 +155,7 @@ ConnectionStatus NetConnection::GetConnectionStatus(ConnectionId conn_id) int i_conn = conn_id.i_conn_; std::vector<ConnGroup> &conn = connection_[i_source]; std::vector<TargetSyn> tv = conn[i_group].target_vect; - + ConnectionStatus conn_stat; conn_stat.i_source = i_source; conn_stat.i_target = tv[i_conn].node; @@ -171,12 +171,12 @@ std::vector<ConnectionStatus> NetConnection::GetConnectionStatus (std::vector<ConnectionId> &conn_id_vect) { std::vector<ConnectionStatus> conn_stat_vect; - + for (unsigned int i=0; i<conn_id_vect.size(); i++) { ConnectionId conn_id = conn_id_vect[i]; ConnectionStatus conn_stat = GetConnectionStatus(conn_id); conn_stat_vect.push_back(conn_stat); } - + return conn_stat_vect; } diff --git a/src/connect.h b/src/connect.h index b57965b85..53234e2e7 100644 --- a/src/connect.h +++ b/src/connect.h @@ -62,7 +62,7 @@ struct TargetSyn unsigned char syn_group; float weight; }; - + struct ConnGroup // connections from the same source node with same delay { int delay; @@ -90,7 +90,7 @@ class NetConnection float time_resolution_; NetConnection() {n_conn_ = 0;} - + std::vector<std::vector<ConnGroup> > connection_; int Insert(int d_int, int i_source, TargetSyn tg); @@ -99,13 +99,13 @@ class NetConnection unsigned char syn_group, float weight, float delay); int Print(); - + int ConnGroupPrint(int i_source); int MaxDelayNum(); - + unsigned int StoredNConnections(); - + unsigned int NConnections(); unsigned int NRevConnections() {return n_rev_conn_;} @@ -119,7 +119,7 @@ class NetConnection std::vector<ConnectionStatus> GetConnectionStatus(std::vector<ConnectionId> &conn_id_vect); - + template<class T> std::vector<ConnectionId> GetConnections(T source, int n_source, @@ -158,7 +158,7 @@ std::vector<ConnectionId> NetConnection::GetConnections(T source, } } } - + return conn_id_vect; } @@ -171,7 +171,7 @@ std::vector<ConnectionId> NetConnection::GetConnections(T source, { std::vector<int> target_vect(i_target, i_target+n_target); std::sort(target_vect.begin(), target_vect.end()); - + std::vector<ConnectionId> conn_id_vect; for (int is=0; is<n_source; is++) { int i_source = GetINode<T>(source, is); @@ -195,7 +195,7 @@ std::vector<ConnectionId> NetConnection::GetConnections(T source, } } } - + return conn_id_vect; } diff --git a/src/connect_mpi.cu b/src/connect_mpi.cu index 1bb962dac..5e3f2629f 100644 --- a/src/connect_mpi.cu +++ b/src/connect_mpi.cu @@ -103,7 +103,7 @@ int ConnectMpi::MpiInit(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &mpi_np_); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_id_); mpi_master_ = 0; - + return 0; } @@ -120,7 +120,7 @@ int ConnectMpi::RemoteConnect(int i_source_host, int i_source_node, float weight, float delay) { int i_remote_node; - + if (mpi_id_==i_source_host && i_source_host==i_target_host) { return net_connection_->Connect(i_source_node, i_target_node, port, syn_group, weight, delay); diff --git a/src/connect_mpi.h b/src/connect_mpi.h index ea6eef2c7..e5f02b3ca 100644 --- a/src/connect_mpi.h +++ b/src/connect_mpi.h @@ -45,24 +45,24 @@ class ConnectMpi int mpi_np_; int mpi_master_; bool remote_spike_height_; - + double SendSpikeToRemote_MPI_time_; double RecvSpikeFromRemote_MPI_time_; double SendSpikeToRemote_CUDAcp_time_; double RecvSpikeFromRemote_CUDAcp_time_; double JoinSpike_time_; - - + + std::vector<std::vector<ExternalConnectionNode > > extern_connection_; int MPI_Recv_int(int *int_val, int n, int sender_id); - + int MPI_Recv_float(float *float_val, int n, int sender_id); int MPI_Recv_uchar(unsigned char *uchar_val, int n, int sender_id); - + int MPI_Send_int(int *int_val, int n, int target_id); - + int MPI_Send_float(float *float_val, int n, int target_id); int MPI_Send_uchar(unsigned char *uchar_val, int n, int target_id); @@ -74,9 +74,9 @@ class ConnectMpi float weight, float delay); */ int MpiInit(int argc, char *argv[]); - + bool ProcMaster(); - + int ExternalSpikeInit(int n_node, int n_hosts, int max_spike_per_host); int SendSpikeToRemote(int n_hosts, int max_spike_per_host); diff --git a/src/connect_rules.cpp b/src/connect_rules.cpp index 84b1dde39..57b243909 100644 --- a/src/connect_rules.cpp +++ b/src/connect_rules.cpp @@ -40,7 +40,7 @@ int ConnSpec::Init() outdegree_ = 0; return 0; } - + ConnSpec::ConnSpec() { Init(); @@ -66,7 +66,7 @@ int ConnSpec::Init(int rule, int degree /*=0*/) else if (rule==FIXED_OUTDEGREE) { outdegree_ = degree; } - + return 0; } @@ -194,7 +194,7 @@ int SynSpec::SetParam(std::string param_name, int value) port_ = value; return 0; } - + throw ngpu_exception("Unknown synapse int parameter"); } @@ -234,7 +234,7 @@ bool SynSpec::IsFloatParam(std::string param_name) return false; } } - + int SynSpec::SetParam(std::string param_name, float *array_pt) { if (param_name=="weight_array") { @@ -246,7 +246,7 @@ int SynSpec::SetParam(std::string param_name, float *array_pt) else { throw ngpu_exception("Unknown synapse array parameter"); } - + return 0; } @@ -328,7 +328,7 @@ int NESTGPU::_RemoteSingleConnect<int> int i_target, float weight, float delay, int i_array, SynSpec &syn_spec) { - + RemoteConnection rc = {i_source, i_target0 + i_target, syn_spec.port_, syn_spec.syn_group_, weight, delay}; @@ -424,7 +424,7 @@ int NESTGPU::RemoteConnect(int i_source_host, int i_source, int n_source, { #ifdef HAVE_MPI RemoteNode<int> rsource(i_source_host, i_source); - RemoteNode<int*> rtarget(i_target_host, target); + RemoteNode<int*> rtarget(i_target_host, target); return _RemoteConnect<int, int*>(rsource, n_source, rtarget, n_target, conn_spec, syn_spec); #else @@ -467,7 +467,7 @@ int NESTGPU::RemoteConnect(int i_source_host, NodeSeq source, #ifdef HAVE_MPI RemoteNode<int> rsource(i_source_host, source.i0); RemoteNode<int> rtarget(i_target_host, target.i0); - + return _RemoteConnect<int, int>(rsource, source.n, rtarget, target.n, conn_spec, syn_spec); #else diff --git a/src/connect_rules.h b/src/connect_rules.h index 9cb1f9b31..0c558bbfe 100644 --- a/src/connect_rules.h +++ b/src/connect_rules.h @@ -64,7 +64,7 @@ int NESTGPU::_Connect(T1 source, int n_source, T2 target, int n_target, // if (syn_spec.delay_distr_ != NULL) { // syn_spec.delay_array_ = Distribution(syn_spec.delay_distr, n); // } - + switch (conn_spec.rule_) { case ONE_TO_ONE: if (n_source != n_target) { @@ -160,7 +160,7 @@ int NESTGPU::_RemoteSingleConnect(int i_source, T target, int i_target, template <class T1, class T2> int NESTGPU::_ConnectOneToOne(T1 source, T2 target, int n_node, - SynSpec &syn_spec) + SynSpec &syn_spec) { for (int in=0; in<n_node; in++) { _SingleConnect<T1, T2>(source, in, target, in, in, syn_spec); @@ -195,7 +195,7 @@ int NESTGPU::_ConnectFixedTotalNumber _SingleConnect<T1, T2>(source, isn, target, itn, i_conn, syn_spec); } delete[] rnd; - + return 0; } @@ -213,8 +213,8 @@ int NESTGPU::_ConnectFixedIndegree } int n_rnd = indegree*THREAD_MAXNUM; if (n_source>=method_thresh*indegree) { // nuovo metodo - n_rnd *= 5; - } + n_rnd *= 5; + } unsigned int *rnd = RandomInt(n_rnd); for (int k=0; k<n_target; k+=THREAD_MAXNUM) { @@ -250,7 +250,7 @@ int NESTGPU::_ConnectFixedIndegree iter = std::lower_bound(sorted_vect.begin(), sorted_vect.end(), j); i1++; - } while (iter != sorted_vect.end() && *iter == j); // we found j + } while (iter != sorted_vect.end() && *iter == j); // we found j sorted_vect.insert(iter, j); int_vect.push_back(j); } @@ -264,7 +264,7 @@ int NESTGPU::_ConnectFixedIndegree } } delete[] rnd; - + return 0; } @@ -282,8 +282,8 @@ int NESTGPU::_ConnectFixedOutdegree } int n_rnd = outdegree*THREAD_MAXNUM; if (n_target>=method_thresh*outdegree) { // choose method - n_rnd *= 5; - } + n_rnd *= 5; + } unsigned int *rnd = RandomInt(n_rnd); @@ -319,11 +319,11 @@ int NESTGPU::_ConnectFixedOutdegree iter = std::lower_bound(sorted_vect.begin(), sorted_vect.end(), j); i1++; - } while (iter != sorted_vect.end() && *iter == j); // we found j + } while (iter != sorted_vect.end() && *iter == j); // we found j sorted_vect.insert(iter, j); int_vect.push_back(j); } - } + } for (int k=0; k<outdegree; k++) { int itn = int_vect[k]; size_t i_array = (size_t)isn*outdegree + k; @@ -333,7 +333,7 @@ int NESTGPU::_ConnectFixedOutdegree } } delete[] rnd; - + return 0; } @@ -468,7 +468,7 @@ template <class T1, class T2> connect_mpi_->MPI_Recv_int(&i_new_remote_node, 1, target.i_host_); for (int k=0; k<n_target; k++) { for (int i=0; i<n_source; i++) { - int i_source_node = source.GetINode(i); + int i_source_node = source.GetINode(i); int i_remote_node = -1; for (std::vector<ExternalConnectionNode >::iterator it = connect_mpi_->extern_connection_[i_source_node].begin(); @@ -515,7 +515,7 @@ template <class T1, class T2> connect_mpi_->MPI_Send_int(&n_remote_node_, 1, source.i_host_); connect_mpi_->MPI_Recv_int(&n_remote_node_, 1, source.i_host_); connect_mpi_->MPI_Recv_int(i_remote_node_arr, n_conn, source.i_host_); - + for (int i_conn=0; i_conn<n_conn; i_conn++) { int i_remote_node = i_remote_node_arr[i_conn]; int itn = rnd[2*i_conn+1] % n_target; @@ -528,7 +528,7 @@ template <class T1, class T2> connect_mpi_->MPI_Recv_int(&i_new_remote_node, 1, target.i_host_); for (int i_conn=0; i_conn<n_conn; i_conn++) { int isn = rnd[2*i_conn] % n_source; - int i_source_node = source.GetINode(isn); + int i_source_node = source.GetINode(isn); int i_remote_node = -1; for (std::vector<ExternalConnectionNode >::iterator it = connect_mpi_->extern_connection_[i_source_node].begin(); @@ -586,7 +586,7 @@ template <class T1, class T2> size_t i_array = (size_t)k*indegree + i; _RemoteSingleConnect<T2>(i_remote_node, target.i_node_, k, i_array, syn_spec); - + } } } @@ -595,8 +595,8 @@ template <class T1, class T2> connect_mpi_->MPI_Recv_int(&i_new_remote_node, 1, target.i_host_); int n_rnd = indegree; if (n_source>=method_thresh*indegree) { // choose method - n_rnd *= 5; - } + n_rnd *= 5; + } unsigned int *rnd = RandomInt(n_rnd); //std::vector<int> input_array; @@ -632,7 +632,7 @@ template <class T1, class T2> iter = std::lower_bound(sorted_vect.begin(), sorted_vect.end(), j); i1++; - } while (iter != sorted_vect.end() && *iter == j); // we found j + } while (iter != sorted_vect.end() && *iter == j); // we found j sorted_vect.insert(iter, j); int_vect.push_back(j); } @@ -695,7 +695,7 @@ template <class T1, class T2> int n_rnd = outdegree; if (n_target>=method_thresh*outdegree) { // choose method - n_rnd *= 5; + n_rnd *= 5; } unsigned int *rnd = RandomInt(n_rnd); @@ -727,11 +727,11 @@ template <class T1, class T2> iter = std::lower_bound(sorted_vect.begin(), sorted_vect.end(), j); i1++; - } while (iter != sorted_vect.end() && *iter == j); // we found j + } while (iter != sorted_vect.end() && *iter == j); // we found j sorted_vect.insert(iter, j); int_vect.push_back(j); } - } + } for (int k=0; k<outdegree; k++) { int i_remote_node = i_remote_node_arr[isn]; int itn = int_vect[k]; diff --git a/src/cuda_error.h b/src/cuda_error.h index dfe0be6de..6b94ba01b 100644 --- a/src/cuda_error.h +++ b/src/cuda_error.h @@ -32,7 +32,7 @@ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { - if (code != cudaSuccess) + if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) throw ngpu_exception("CUDA error"); diff --git a/src/ext_neuron.cu b/src/ext_neuron.cu index fcb98d426..77e5c57db 100644 --- a/src/ext_neuron.cu +++ b/src/ext_neuron.cu @@ -43,7 +43,7 @@ __global__ void UpdateExtNeuron(float *port_input_pt, float *port_value_pt, //printf("port %d node %d pip %f\n", i_port, i_node, *pip); port_value_pt[i_node*n_var + n_port_var*i_port] = *pip; - *pip = 0.0; + *pip = 0.0; } } @@ -92,7 +92,7 @@ int ext_neuron::Init(int i_node_0, int n_node, int n_port, } SetPortParam(0, n_node, "port_weight", port_weight_vect_.data(), n_port); SetPortVar(0, n_node, "port_input", port_input_vect_.data(), n_port); - + return 0; } @@ -100,19 +100,19 @@ int ext_neuron::Update(long long it, double t1) { // std::cout << "Ext neuron update\n"; float *port_input_pt = GetVarPt(0, "port_input", 0); float *port_value_pt = GetVarPt(0, "port_value", 0); - + UpdateExtNeuron<<<(n_node_*n_port_+1023)/1024, 1024>>> (port_input_pt, port_value_pt, n_node_, n_var_, n_port_var_, n_port_); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int ext_neuron::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } @@ -126,6 +126,6 @@ float *ext_neuron::GetExtNeuronInputSpikes(int *n_node, int *n_port) float *var_arr = GetPortVar(0, n_node_, "port_value"); ext_neuron_input_spikes_.assign(var_arr, var_arr+n_node_*n_port_); free(var_arr); - + return ext_neuron_input_spikes_.data(); } diff --git a/src/ext_neuron.h b/src/ext_neuron.h index 98eb37d91..3652ffb39 100644 --- a/src/ext_neuron.h +++ b/src/ext_neuron.h @@ -81,13 +81,13 @@ class ext_neuron : public BaseNeuron unsigned long long *seed); //int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); int Free(); float *GetExtNeuronInputSpikes(int *n_node, int *n_port); - + }; diff --git a/src/get_spike.cu b/src/get_spike.cu index 7acc5050c..8b40e90e3 100644 --- a/src/get_spike.cu +++ b/src/get_spike.cu @@ -47,8 +47,8 @@ __device__ double atomicAddDouble(double* address, double val) unsigned long long int old = *address_as_ull, assumed; do { assumed = old; - old = atomicCAS(address_as_ull, assumed, - __double_as_longlong(val + + old = atomicCAS(address_as_ull, assumed, + __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); @@ -74,7 +74,7 @@ __device__ void CollectSpikeFunction(int i_spike, int i_syn) //" port %d weight %f\n", //i_spike, i_source, i_conn, i_syn, i_target, //port, weight); - + ///////////////////////////////////////////////////////////////// int i_group=NodeGroupMap[i_target]; int i = port*NodeGroupArray[i_group].n_node_ + i_target @@ -85,7 +85,7 @@ __device__ void CollectSpikeFunction(int i_spike, int i_syn) if (syn_group>0) { ConnectionGroupTargetSpikeTime[i_conn*NSpikeBuffer+i_source][i_syn] = (unsigned short)(NESTGPUTimeIdx & 0xffff); - + long long Dt_int = NESTGPUTimeIdx - LastRevSpikeTimeIdx[i_target]; if (Dt_int>0 && Dt_int<MAX_SYN_DT) { SynapseUpdate(syn_group, &ConnectionGroupTargetWeight @@ -160,7 +160,7 @@ __global__ void GetSpikes(double *spike_array, int array_size, int n_port, double d_val = (double)port_input_arr[port_input] + spike_array[i_array] * port_weight_arr[port_weight]; - + port_input_arr[port_input] = (float)d_val; } } @@ -175,7 +175,7 @@ int NESTGPU::ClearGetSpikeArrays() *sizeof(double))); } } - + return 0; } @@ -187,6 +187,6 @@ int NESTGPU::FreeGetSpikeArrays() gpuErrchk(cudaFree(bn->get_spike_array_)); } } - + return 0; } diff --git a/src/iaf_psc_alpha.cu b/src/iaf_psc_alpha.cu index 5e2036716..7e21764b7 100644 --- a/src/iaf_psc_alpha.cu +++ b/src/iaf_psc_alpha.cu @@ -81,7 +81,7 @@ __global__ void iaf_psc_alpha_Calibrate(int n_node, float *param_arr, int i_neuron = threadIdx.x + blockIdx.x * blockDim.x; if (i_neuron<n_node) { float *param = param_arr + n_param*i_neuron; - + P11ex = P22ex = exp( -h / tau_ex ); P11in = P22in = exp( -h / tau_in ); P33 = exp( -h / tau_m ); @@ -119,7 +119,7 @@ __global__ void iaf_psc_alpha_Update(int n_node, int i_node_0, float *var_arr, V_m_rel = P30 * I_e + P31ex * dI_ex + P32ex * I_ex + P31in * dI_in + P32in * I_in + expm1_tau_m * V_m_rel + V_m_rel; } - + // alpha shape PSCs I_ex = P21ex * dI_ex + P22ex * I_ex; dI_ex *= P11ex; @@ -151,7 +151,7 @@ int iaf_psc_alpha::Init(int i_node_0, int n_node, int /*n_port*/, n_var_ = n_scal_var_; n_scal_param_ = N_SCAL_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); @@ -189,7 +189,7 @@ int iaf_psc_alpha::Init(int i_node_0, int n_node, int /*n_port*/, SetScalVar(0, n_node, "dI_in", 0.0 ); SetScalVar(0, n_node, "V_m_rel", -70.0 - (-70.0) ); // in mV, relative to E_L SetScalVar(0, n_node, "refractory_step", 0 ); - + port_weight_arr_ = GetParamArr() + GetScalParamIdx("EPSCInitialValue"); port_weight_arr_step_ = n_param_; port_weight_port_step_ = 1; @@ -199,7 +199,7 @@ int iaf_psc_alpha::Init(int i_node_0, int n_node, int /*n_port*/, port_input_port_step_ = 1; den_delay_arr_ = GetParamArr() + GetScalParamIdx("den_delay"); - + return 0; } @@ -209,15 +209,15 @@ int iaf_psc_alpha::Update(long long it, double t1) iaf_psc_alpha_Update<<<(n_node_+1023)/1024, 1024>>> (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); // gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int iaf_psc_alpha::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } diff --git a/src/iaf_psc_alpha.h b/src/iaf_psc_alpha.h index e49393c2e..3a5f3eecf 100644 --- a/src/iaf_psc_alpha.h +++ b/src/iaf_psc_alpha.h @@ -106,7 +106,7 @@ References DOI: https://doi.org/10.1007/s004220050570 .. [2] Potjans TC. and Diesmann M. 2014. The cell-type specific cortical microcircuit: relating structure and activity in a full-scale spiking - network model. Cerebral Cortex. 24(3):785–806. + network model. Cerebral Cortex. 24(3):785–806. DOI: https://doi.org/10.1093/cercor/bhs358. See also @@ -162,7 +162,7 @@ enum ScalParamIndexes { N_SCAL_PARAM }; - + const std::string iaf_psc_alpha_scal_var_name[N_SCAL_VAR] = { "I_syn_ex", "I_syn_in", @@ -202,17 +202,17 @@ const std::string iaf_psc_alpha_scal_param_name[N_SCAL_PARAM] = { }; } // namespace - + class iaf_psc_alpha : public BaseNeuron { public: ~iaf_psc_alpha(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double, float time_resolution); - + int Update(long long it, double t1); int Free(); diff --git a/src/iaf_psc_exp.cu b/src/iaf_psc_exp.cu index 38028184f..7ae97dcd5 100644 --- a/src/iaf_psc_exp.cu +++ b/src/iaf_psc_exp.cu @@ -71,12 +71,12 @@ __global__ void iaf_psc_exp_Calibrate(int n_node, float *param_arr, int i_neuron = threadIdx.x + blockIdx.x * blockDim.x; if (i_neuron<n_node) { float *param = param_arr + n_param*i_neuron; - + P11ex = exp( -h / tau_ex ); P11in = exp( -h / tau_in ); P22 = exp( -h / tau_m ); P21ex = (float)propagator_32( tau_ex, tau_m, C_m, h ); - P21in = (float)propagator_32( tau_in, tau_m, C_m, h ); + P21in = (float)propagator_32( tau_in, tau_m, C_m, h ); P20 = tau_m / C_m * ( 1.0 - P22 ); } } @@ -89,7 +89,7 @@ __global__ void iaf_psc_exp_Update(int n_node, int i_node_0, float *var_arr, if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -100,12 +100,12 @@ __global__ void iaf_psc_exp_Update(int n_node, int i_node_0, float *var_arr, // exponential decaying PSCs I_syn_ex *= P11ex; I_syn_in *= P11in; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = (int)round(t_ref/NESTGPUTimeResolution); - } + } } } @@ -125,7 +125,7 @@ int iaf_psc_exp::Init(int i_node_0, int n_node, int /*n_port*/, n_var_ = n_scal_var_; n_scal_param_ = N_SCAL_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); @@ -163,14 +163,14 @@ int iaf_psc_exp::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn_ex, I_syn_in port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn_ex"); port_input_arr_step_ = n_var_; port_input_port_step_ = 1; den_delay_arr_ = GetParamArr() + GetScalParamIdx("den_delay"); - + return 0; } @@ -180,15 +180,15 @@ int iaf_psc_exp::Update(long long it, double t1) iaf_psc_exp_Update<<<(n_node_+1023)/1024, 1024>>> (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); // gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int iaf_psc_exp::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } diff --git a/src/iaf_psc_exp.h b/src/iaf_psc_exp.h index 15ee5a769..e91f28900 100644 --- a/src/iaf_psc_exp.h +++ b/src/iaf_psc_exp.h @@ -50,7 +50,7 @@ Description +++++++++++ iaf_psc_exp is an implementation of a leaky integrate-and-fire model -with exponential shaped postsynaptic currents (PSCs) according to +with exponential shaped postsynaptic currents (PSCs) according to equations 1, 2, 4 and 5 of [1]_ and equation 3 of [2]_. Thus, postsynaptic currents have an infinitely short rise time. @@ -109,7 +109,7 @@ References DOI: https://doi.org/10.1007/s004220050570 .. [4] Potjans TC. and Diesmann M. 2014. The cell-type specific cortical microcircuit: relating structure and activity in a full-scale spiking - network model. Cerebral Cortex. 24(3):785–806. + network model. Cerebral Cortex. 24(3):785–806. DOI: https://doi.org/10.1093/cercor/bhs358. See also @@ -154,7 +154,7 @@ enum ScalParamIndexes { N_SCAL_PARAM }; - + const std::string iaf_psc_exp_scal_var_name[N_SCAL_VAR] = { "I_syn_ex", "I_syn_in", @@ -185,17 +185,17 @@ const std::string iaf_psc_exp_scal_param_name[N_SCAL_PARAM] = { }; } // namespace - + class iaf_psc_exp : public BaseNeuron { public: ~iaf_psc_exp(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double, float time_resolution); - + int Update(long long it, double t1); int Free(); diff --git a/src/iaf_psc_exp_g.cu b/src/iaf_psc_exp_g.cu index 598f188b9..5432c1309 100644 --- a/src/iaf_psc_exp_g.cu +++ b/src/iaf_psc_exp_g.cu @@ -56,7 +56,7 @@ __global__ void iaf_psc_exp_g_Update if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -66,12 +66,12 @@ __global__ void iaf_psc_exp_g_Update } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -114,7 +114,7 @@ int iaf_psc_exp_g::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_param_ = N_SCAL_PARAM; n_group_param_ = N_GROUP_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); group_param_ = new float[N_GROUP_PARAM]; @@ -144,7 +144,7 @@ int iaf_psc_exp_g::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -167,15 +167,15 @@ int iaf_psc_exp_g::Update(long long it, double t1) (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_, Theta_rel_, V_reset_rel_, n_refractory_steps, P11, P22, P21, P20 ); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int iaf_psc_exp_g::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); delete[] group_param_; - + return 0; } diff --git a/src/iaf_psc_exp_g.h b/src/iaf_psc_exp_g.h index 37c290f3e..843285dab 100644 --- a/src/iaf_psc_exp_g.h +++ b/src/iaf_psc_exp_g.h @@ -50,7 +50,7 @@ Description +++++++++++ iaf_psc_exp_g is an implementation of a leaky integrate-and-fire model -with exponential shaped postsynaptic currents (PSCs) according to +with exponential shaped postsynaptic currents (PSCs) according to equations 1, 2, 4 and 5 of [1]_ and equation 3 of [2]_. Thus, postsynaptic currents have an infinitely short rise time. @@ -116,7 +116,7 @@ References DOI: https://doi.org/10.1007/s004220050570 .. [4] Potjans TC. and Diesmann M. 2014. The cell-type specific cortical microcircuit: relating structure and activity in a full-scale spiking - network model. Cerebral Cortex. 24(3):785–806. + network model. Cerebral Cortex. 24(3):785–806. DOI: https://doi.org/10.1093/cercor/bhs358. See also @@ -154,7 +154,7 @@ enum GroupParamIndexes { }; - + const std::string iaf_psc_exp_g_scal_var_name[N_SCAL_VAR] = { "I_syn", "V_m_rel", @@ -174,9 +174,9 @@ const std::string iaf_psc_exp_g_group_param_name[N_GROUP_PARAM] = { "tau_syn", "t_ref" }; - + } // namespace - + @@ -186,14 +186,14 @@ class iaf_psc_exp_g : public BaseNeuron public: ~iaf_psc_exp_g(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double /*time_min*/, float time_res) { time_resolution_ = time_res; return 0; } - + int Update(long long it, double t1); int Free(); diff --git a/src/iaf_psc_exp_hc.cu b/src/iaf_psc_exp_hc.cu index dbabfcbdd..fd481a682 100644 --- a/src/iaf_psc_exp_hc.cu +++ b/src/iaf_psc_exp_hc.cu @@ -49,7 +49,7 @@ __global__ void iaf_psc_exp_hc_Update(int n_node, int i_node_0, if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -59,12 +59,12 @@ __global__ void iaf_psc_exp_hc_Update(int n_node, int i_node_0, } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -84,7 +84,7 @@ int iaf_psc_exp_hc::Init(int i_node_0, int n_node, int /*n_port*/, n_var_ = n_scal_var_; n_scal_param_ = N_SCAL_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); @@ -104,7 +104,7 @@ int iaf_psc_exp_hc::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -119,14 +119,14 @@ int iaf_psc_exp_hc::Update(long long it, double t1) iaf_psc_exp_hc_Update<<<(n_node_+1023)/1024, 1024>>> (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int iaf_psc_exp_hc::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } diff --git a/src/iaf_psc_exp_hc.h b/src/iaf_psc_exp_hc.h index 75899907b..13c05a99f 100644 --- a/src/iaf_psc_exp_hc.h +++ b/src/iaf_psc_exp_hc.h @@ -64,13 +64,13 @@ const std::string iaf_psc_exp_hc_scal_param_name[N_SCAL_PARAM] = { }; } // namespace - + class iaf_psc_exp_hc : public BaseNeuron { public: ~iaf_psc_exp_hc(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); diff --git a/src/izhikevich.cu b/src/izhikevich.cu index e7ace088d..006943491 100644 --- a/src/izhikevich.cu +++ b/src/izhikevich.cu @@ -57,7 +57,7 @@ __global__ void izhikevich_Update if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -71,7 +71,7 @@ __global__ void izhikevich_Update u += h*a*(b*v_old - u_old); } I_syn = 0; - + if ( V_m >= V_th ) { // send spike PushSpike(i_node_0 + i_neuron, 1.0); V_m = c; @@ -102,7 +102,7 @@ int izhikevich::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_param_ = N_SCAL_PARAM; n_group_param_ = N_GROUP_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); group_param_ = new float[N_GROUP_PARAM]; @@ -113,7 +113,7 @@ int izhikevich::Init(int i_node_0, int n_node, int /*n_port*/, SetScalParam(0, n_node, "I_e", 0.0 ); // in pA SetScalParam(0, n_node, "den_delay", 0.0 ); // in ms - + SetScalVar(0, n_node, "I_syn", 0.0 ); SetScalVar(0, n_node, "V_m", -70.0 ); // in mV SetScalVar(0, n_node, "u", -70.0*0.2 ); @@ -133,7 +133,7 @@ int izhikevich::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -152,15 +152,15 @@ int izhikevich::Update(long long it, double t1) (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_, V_th_, a_, b_, c_, d_, n_refractory_steps, h); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int izhikevich::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); delete[] group_param_; - + return 0; } diff --git a/src/izhikevich.h b/src/izhikevich.h index b2017813d..78d314603 100644 --- a/src/izhikevich.h +++ b/src/izhikevich.h @@ -119,7 +119,7 @@ enum GroupParamIndexes { }; - + const std::string izhikevich_scal_var_name[N_SCAL_VAR] = { "I_syn", "V_m", @@ -140,9 +140,9 @@ const std::string izhikevich_group_param_name[N_GROUP_PARAM] = { "d", "t_ref" }; - + } // namespace - + @@ -152,14 +152,14 @@ class izhikevich : public BaseNeuron public: ~izhikevich(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double /*time_min*/, float time_res) { time_resolution_ = time_res; return 0; } - + int Update(long long it, double t1); int Free(); diff --git a/src/izhikevich_cond_beta.cu b/src/izhikevich_cond_beta.cu index f0ec6e389..d1ba0da22 100644 --- a/src/izhikevich_cond_beta.cu +++ b/src/izhikevich_cond_beta.cu @@ -49,7 +49,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, I_e = 0.0; t_ref = 0.0; den_delay = 0.0; - + V_m = -70.0; u = b*V_m; refractory_step = 0; @@ -94,7 +94,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, izhikevich_cond_beta_rk5 data_struct) @@ -116,14 +116,14 @@ using namespace izhikevich_cond_beta_ns; int izhikevich_cond_beta::Init(int i_node_0, int n_node, int n_port, int i_group, unsigned long long *seed) { BaseNeuron::Init(i_node_0, n_node, n_port, i_group, seed); - + node_type_ = i_izhikevich_cond_beta_model; n_scal_var_ = N_SCAL_VAR; n_port_var_ = N_PORT_VAR; n_scal_param_ = N_SCAL_PARAM; n_port_param_ = N_PORT_PARAM; n_group_param_ = N_GROUP_PARAM; - + n_var_ = n_scal_var_ + n_port_var_*n_port; n_param_ = n_scal_param_ + n_port_param_*n_port; @@ -164,7 +164,7 @@ int izhikevich_cond_beta::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/izhikevich_cond_beta.h b/src/izhikevich_cond_beta.h index d3d27f8d5..6cf610733 100644 --- a/src/izhikevich_cond_beta.h +++ b/src/izhikevich_cond_beta.h @@ -64,9 +64,9 @@ The dynamics are given by: &v \text{ jumps on each spike arrival by the weight of the spike} This implementation uses the standard technique for forward Euler integration. -This model is multisynapse, so it allows an arbitrary number of synaptic -rise time and decay time constants. The number of receptor ports must be specified -at neuron creation (default value is 1) and the receptor index starts from 0 +This model is multisynapse, so it allows an arbitrary number of synaptic +rise time and decay time constants. The number of receptor ports must be specified +at neuron creation (default value is 1) and the receptor index starts from 0 (and not from 1 as in NEST multisynapse models). The time constants are supplied by by two arrays, ``tau_rise`` and ``tau_decay`` for the synaptic rise time and decay time, respectively. The synaptic @@ -95,7 +95,7 @@ The following parameters can be set in the status dictionary. tau_decay ms Decay time constant of synaptic conductance h_min_rel real Starting step in ODE integration relative to time resolution - h0_rel real Minimum step in ODE integration relative to + h0_rel real Minimum step in ODE integration relative to time resolution ======================= ======= ============================================== @@ -131,22 +131,22 @@ class izhikevich_cond_beta : public BaseNeuron float h_min_; float h_; izhikevich_cond_beta_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/izhikevich_cond_beta_kernel.h b/src/izhikevich_cond_beta_kernel.h index 3b5f22650..c3839c018 100644 --- a/src/izhikevich_cond_beta_kernel.h +++ b/src/izhikevich_cond_beta_kernel.h @@ -78,7 +78,7 @@ enum GroupParamIndexes { N_GROUP_PARAM }; - + const std::string izhikevich_cond_beta_scal_var_name[N_SCAL_VAR] = { "V_m", "u" @@ -105,7 +105,7 @@ const std::string izhikevich_cond_beta_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_rise", "tau_decay", - "g0" + "g0" }; @@ -164,7 +164,7 @@ __device__ dVdt = ( refractory_step > 0 ) ? 0 : 0.04 * V * V + 5.0 * V + 140.0 - u + I_syn + I_e; - + dudt = a*(b*V - u); for (int i=0; i<n_port; i++) { diff --git a/src/izhikevich_psc_exp.cu b/src/izhikevich_psc_exp.cu index 03c29404a..a1aaceea5 100644 --- a/src/izhikevich_psc_exp.cu +++ b/src/izhikevich_psc_exp.cu @@ -58,7 +58,7 @@ __global__ void izhikevich_psc_exp_Update if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -73,7 +73,7 @@ __global__ void izhikevich_psc_exp_Update } // exponential decaying PSC I_syn *= C_syn; - + if ( V_m >= V_th ) { // send spike PushSpike(i_node_0 + i_neuron, 1.0); V_m = c; @@ -104,7 +104,7 @@ int izhikevich_psc_exp::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_param_ = N_SCAL_PARAM; n_group_param_ = N_GROUP_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); group_param_ = new float[N_GROUP_PARAM]; @@ -115,7 +115,7 @@ int izhikevich_psc_exp::Init(int i_node_0, int n_node, int /*n_port*/, SetScalParam(0, n_node, "I_e", 0.0 ); // in pA SetScalParam(0, n_node, "den_delay", 0.0 ); // in ms - + SetScalVar(0, n_node, "I_syn", 0.0 ); SetScalVar(0, n_node, "V_m", -70.0 ); // in mV SetScalVar(0, n_node, "u", -70.0*0.2 ); @@ -136,7 +136,7 @@ int izhikevich_psc_exp::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -156,15 +156,15 @@ int izhikevich_psc_exp::Update(long long it, double t1) (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_, V_th_, a_, b_, c_, d_, n_refractory_steps, h, C_syn); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int izhikevich_psc_exp::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); delete[] group_param_; - + return 0; } diff --git a/src/izhikevich_psc_exp.h b/src/izhikevich_psc_exp.h index a6bb98542..b159b6c16 100644 --- a/src/izhikevich_psc_exp.h +++ b/src/izhikevich_psc_exp.h @@ -124,7 +124,7 @@ enum GroupParamIndexes { }; - + const std::string izhikevich_psc_exp_scal_var_name[N_SCAL_VAR] = { "I_syn", "V_m", @@ -146,9 +146,9 @@ const std::string izhikevich_psc_exp_group_param_name[N_GROUP_PARAM] = { "tau_syn", "t_ref" }; - + } // namespace - + @@ -158,14 +158,14 @@ class izhikevich_psc_exp : public BaseNeuron public: ~izhikevich_psc_exp(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double /*time_min*/, float time_res) { time_resolution_ = time_res; return 0; } - + int Update(long long it, double t1); int Free(); diff --git a/src/izhikevich_psc_exp_2s.cu b/src/izhikevich_psc_exp_2s.cu index 85f5ef49c..b2419b80c 100644 --- a/src/izhikevich_psc_exp_2s.cu +++ b/src/izhikevich_psc_exp_2s.cu @@ -59,11 +59,11 @@ __global__ void izhikevich_psc_exp_2s_Update if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; - + for (int i=0; i<INTEGR_STEPS; i++) { // exponential decaying PSC I_syn *= C_syn; @@ -111,7 +111,7 @@ int izhikevich_psc_exp_2s::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_param_ = N_SCAL_PARAM; n_group_param_ = N_GROUP_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); group_param_ = new float[N_GROUP_PARAM]; @@ -122,7 +122,7 @@ int izhikevich_psc_exp_2s::Init(int i_node_0, int n_node, int /*n_port*/, SetScalParam(0, n_node, "I_e", 0.0 ); // in pA SetScalParam(0, n_node, "den_delay", 0.0 ); // in ms - + SetScalVar(0, n_node, "I_syn", 0.0 ); SetScalVar(0, n_node, "V_m", -70.0 ); // in mV SetScalVar(0, n_node, "u", -70.0*0.2 ); @@ -143,7 +143,7 @@ int izhikevich_psc_exp_2s::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -163,15 +163,15 @@ int izhikevich_psc_exp_2s::Update(long long it, double t1) (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_, V_th_, a_, b_, c_, d_, n_refractory_steps, h, C_syn); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int izhikevich_psc_exp_2s::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); delete[] group_param_; - + return 0; } diff --git a/src/izhikevich_psc_exp_2s.h b/src/izhikevich_psc_exp_2s.h index 75b3c5d06..83ce8b929 100644 --- a/src/izhikevich_psc_exp_2s.h +++ b/src/izhikevich_psc_exp_2s.h @@ -64,7 +64,7 @@ enum GroupParamIndexes { }; - + const std::string izhikevich_psc_exp_2s_scal_var_name[N_SCAL_VAR] = { "I_syn", "V_m", @@ -86,9 +86,9 @@ const std::string izhikevich_psc_exp_2s_group_param_name[N_GROUP_PARAM] = { "tau_syn", "t_ref" }; - + } // namespace - + @@ -98,14 +98,14 @@ class izhikevich_psc_exp_2s : public BaseNeuron public: ~izhikevich_psc_exp_2s(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double /*time_min*/, float time_res) { time_resolution_ = time_res; return 0; } - + int Update(long long it, double t1); int Free(); diff --git a/src/izhikevich_psc_exp_5s.cu b/src/izhikevich_psc_exp_5s.cu index c59827a84..409e49690 100644 --- a/src/izhikevich_psc_exp_5s.cu +++ b/src/izhikevich_psc_exp_5s.cu @@ -59,11 +59,11 @@ __global__ void izhikevich_psc_exp_5s_Update if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; - + for (int i=0; i<INTEGR_STEPS; i++) { // exponential decaying PSC I_syn *= C_syn; @@ -111,7 +111,7 @@ int izhikevich_psc_exp_5s::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_param_ = N_SCAL_PARAM; n_group_param_ = N_GROUP_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); group_param_ = new float[N_GROUP_PARAM]; @@ -122,7 +122,7 @@ int izhikevich_psc_exp_5s::Init(int i_node_0, int n_node, int /*n_port*/, SetScalParam(0, n_node, "I_e", 0.0 ); // in pA SetScalParam(0, n_node, "den_delay", 0.0 ); // in ms - + SetScalVar(0, n_node, "I_syn", 0.0 ); SetScalVar(0, n_node, "V_m", -70.0 ); // in mV SetScalVar(0, n_node, "u", -70.0*0.2 ); @@ -143,7 +143,7 @@ int izhikevich_psc_exp_5s::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -163,15 +163,15 @@ int izhikevich_psc_exp_5s::Update(long long it, double t1) (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_, V_th_, a_, b_, c_, d_, n_refractory_steps, h, C_syn); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int izhikevich_psc_exp_5s::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); delete[] group_param_; - + return 0; } diff --git a/src/izhikevich_psc_exp_5s.h b/src/izhikevich_psc_exp_5s.h index 43957dcf1..6cd85a090 100644 --- a/src/izhikevich_psc_exp_5s.h +++ b/src/izhikevich_psc_exp_5s.h @@ -64,7 +64,7 @@ enum GroupParamIndexes { }; - + const std::string izhikevich_psc_exp_5s_scal_var_name[N_SCAL_VAR] = { "I_syn", "V_m", @@ -86,9 +86,9 @@ const std::string izhikevich_psc_exp_5s_group_param_name[N_GROUP_PARAM] = { "tau_syn", "t_ref" }; - + } // namespace - + @@ -98,14 +98,14 @@ class izhikevich_psc_exp_5s : public BaseNeuron public: ~izhikevich_psc_exp_5s(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double /*time_min*/, float time_res) { time_resolution_ = time_res; return 0; } - + int Update(long long it, double t1); int Free(); diff --git a/src/multimeter.cu b/src/multimeter.cu index 6989f8130..1787873d2 100644 --- a/src/multimeter.cu +++ b/src/multimeter.cu @@ -62,14 +62,14 @@ Record::Record(std::vector<BaseNeuron*> neur_vect, std::string file_name, int Record::OpenFile() { fp_=fopen(file_name_.c_str(), "w"); - + return 0; } int Record::CloseFile() { fclose(fp_); - + return 0; } @@ -77,7 +77,7 @@ int Record::WriteRecord(float t) { float var; std::vector<float> vect; - + if (out_file_flag_) { fprintf(fp_,"%f", t); } @@ -129,27 +129,27 @@ int Multimeter::OpenFiles() record_vect_[i].OpenFile(); } } - + return 0; } int Multimeter::CloseFiles() -{ +{ for (unsigned int i=0; i<record_vect_.size(); i++) { if (record_vect_[i].out_file_flag_) { record_vect_[i].CloseFile(); } } - + return 0; } int Multimeter::WriteRecords(float t) -{ +{ for (unsigned int i=0; i<record_vect_.size(); i++) { record_vect_[i].WriteRecord(t); } - + return 0; } @@ -158,6 +158,6 @@ std::vector<std::vector<float> > *Multimeter::GetRecordData(int i_record) if (i_record<0 || i_record>=(int)record_vect_.size()) { throw ngpu_exception("Record does not exist."); } - + return &record_vect_[i_record].data_vect_; } diff --git a/src/multimeter.h b/src/multimeter.h index eabc060e1..ed65ac7be 100644 --- a/src/multimeter.h +++ b/src/multimeter.h @@ -84,7 +84,7 @@ can also be retreived through the commands ``GetRecordDataRows`` and print("recorder has {} rows and {} columns".format(rows, columns)) recorded_data = nestgpu.GetRecordData(record) - + time = [row[0] for row in recorded_data] variable = [row[1] for row in recorded_data] @@ -115,13 +115,13 @@ class Record std::vector<int> i_neur_vect, std::vector<int> port_vect); int OpenFile(); - + int CloseFile(); - + int WriteRecord(float t); }; - + class Multimeter { public: @@ -139,7 +139,7 @@ class Multimeter int WriteRecords(float t); std::vector<std::vector<float> > *GetRecordData(int i_record); - + }; #endif diff --git a/src/nestgpu.cu b/src/nestgpu.cu index 8124a912f..be81cb3ab 100644 --- a/src/nestgpu.cu +++ b/src/nestgpu.cu @@ -106,8 +106,8 @@ NESTGPU::NESTGPU() poiss_generator_ = new PoissonGenerator; multimeter_ = new Multimeter; net_connection_ = new NetConnection; - - + + calibrate_flag_ = false; start_real_time_ = getRealTime(); @@ -119,23 +119,23 @@ NESTGPU::NESTGPU() SetTimeResolution(0.1); // time resolution in ms max_spike_num_fact_ = 1.0; max_spike_per_host_fact_ = 1.0; - + error_flag_ = false; error_message_ = ""; error_code_ = 0; - + on_exception_ = ON_EXCEPTION_EXIT; verbosity_level_ = 4; print_time_ = false; - + mpi_flag_ = false; #ifdef HAVE_MPI connect_mpi_ = new ConnectMpi; connect_mpi_->net_connection_ = net_connection_; connect_mpi_->remote_spike_height_ = false; #endif - + SetRandomSeed(54321ULL); SpikeBufferUpdate_time_ = 0; @@ -199,14 +199,14 @@ int NESTGPU::SetTimeResolution(float time_res) { time_resolution_ = time_res; net_connection_->time_resolution_ = time_res; - + return 0; } int NESTGPU::SetMaxSpikeBufferSize(int max_size) { max_spike_buffer_size_ = max_size; - + return 0; } @@ -240,7 +240,7 @@ int NESTGPU::CreateNodeGroup(int n_node, int n_port) } int i_group = node_vect_.size() - 1; node_group_map_.insert(node_group_map_.end(), n_node, i_group); - + std::vector<ConnGroup> conn; std::vector<std::vector<ConnGroup> >::iterator it = net_connection_->connection_.end(); @@ -252,10 +252,10 @@ int NESTGPU::CreateNodeGroup(int n_node, int n_port) = connect_mpi_->extern_connection_.end(); connect_mpi_->extern_connection_.insert(it1, n_node, conn_node); #endif - + node_vect_[i_group]->Init(i_node_0, n_node, n_port, i_group, &kernel_seed_); node_vect_[i_group]->get_spike_array_ = InitGetSpikeArray(n_node, n_port); - + return i_node_0; } @@ -268,16 +268,16 @@ NodeSeq NESTGPU::CreatePoissonGenerator(int n_node, float rate) else if (n_node <= 0) { throw ngpu_exception("Number of nodes must be greater than zero."); } - - n_poiss_node_ = n_node; - + + n_poiss_node_ = n_node; + BaseNeuron *bn = new BaseNeuron; node_vect_.push_back(bn); int i_node_0 = CreateNodeGroup( n_node, 0); - + float lambda = rate*time_resolution_ / 1000.0; // rate is in Hz, time in ms poiss_generator_->Create(random_generator_, i_node_0, n_node, lambda); - + return NodeSeq(i_node_0, n_node); } @@ -287,7 +287,7 @@ int NESTGPU::CheckUncalibrated(std::string message) if (calibrate_flag_ == true) { throw ngpu_exception(message); } - + return 0; } @@ -305,11 +305,11 @@ int NESTGPU::Calibrate() if (verbosity_level_>=1) { std::cout << MpiRankStr() << "Calibrating ...\n"; } - + neural_time_ = t_min_; - + NodeGroupArrayInit(); - + max_spike_num_ = (int)round(max_spike_num_fact_ * net_connection_->connection_.size() * net_connection_->MaxDelayNum()); @@ -319,7 +319,7 @@ int NESTGPU::Calibrate() * net_connection_->connection_.size() * net_connection_->MaxDelayNum()); max_spike_per_host_ = (max_spike_per_host_>1) ? max_spike_per_host_ : 1; - + SpikeInit(max_spike_num_); SpikeBufferInit(net_connection_, max_spike_buffer_size_); @@ -331,19 +331,19 @@ int NESTGPU::Calibrate() max_spike_per_host_); } #endif - + if (net_connection_->NRevConnections()>0) { - RevSpikeInit(net_connection_); + RevSpikeInit(net_connection_); } - + multimeter_->OpenFiles(); - + for (unsigned int i=0; i<node_vect_.size(); i++) { node_vect_[i]->Calibrate(t_min_, time_resolution_); } - + SynGroupCalibrate(); - + gpuErrchk(cudaMemcpyToSymbolAsync(NESTGPUTimeResolution, &time_resolution_, sizeof(float))); /////////////////////////////////// @@ -359,7 +359,7 @@ int NESTGPU::Simulate(float sim_time) { int NESTGPU::Simulate() { StartSimulation(); - + for (long long it=0; it<Nt_; it++) { if (it%100==0 && verbosity_level_>=2 && print_time_==true) { printf("\r[%.2lf %%] Model time: %.3lf ms", 100.0*(neural_time_-neur_t0_)/sim_time_, neural_time_); @@ -376,7 +376,7 @@ int NESTGPU::StartSimulation() if (!calibrate_flag_) { Calibrate(); } -#ifdef HAVE_MPI +#ifdef HAVE_MPI if (mpi_flag_) { MPI_Barrier(MPI_COMM_WORLD); } @@ -391,11 +391,11 @@ int NESTGPU::StartSimulation() std::cout << MpiRankStr() << "Simulating ...\n"; printf("Neural activity simulation time: %.3lf ms\n", sim_time_); } - + neur_t0_ = neural_time_; it_ = 0; Nt_ = (long long)round(sim_time_/time_resolution_); - + return 0; } @@ -404,7 +404,7 @@ int NESTGPU::EndSimulation() if (verbosity_level_>=2 && print_time_==true) { printf("\r[%.2lf %%] Model time: %.3lf ms", 100.0*(neural_time_-neur_t0_)/sim_time_, neural_time_); } -#ifdef HAVE_MPI +#ifdef HAVE_MPI if (mpi_flag_) { MPI_Barrier(MPI_COMM_WORLD); } @@ -454,14 +454,14 @@ int NESTGPU::EndSimulation() connect_mpi_->JoinSpike_time_ << "\n"; } #endif - + if (verbosity_level_>=1) { std::cout << MpiRankStr() << "Building time: " << (build_real_time_ - start_real_time_) << "\n"; std::cout << MpiRankStr() << "Simulation time: " << (end_real_time_ - build_real_time_) << "\n"; } - + return 0; } @@ -497,12 +497,12 @@ int NESTGPU::SimulationStep() ResetConnectionSpikeTimeDown(net_connection_); } } - + for (unsigned int i=0; i<node_vect_.size(); i++) { node_vect_[i]->Update(it_, neural_time_); } gpuErrchk( cudaPeekAtLastError() ); - + neuron_Update_time_ += (getRealTime() - time_mark); multimeter_->WriteRecords(neural_time_); @@ -529,7 +529,7 @@ int NESTGPU::SimulationStep() time_mark = getRealTime(); connect_mpi_->RecvSpikeFromRemote(connect_mpi_->mpi_np_, max_spike_per_host_); - + RecvSpikeFromRemote_time_ += (getRealTime() - time_mark); connect_mpi_->CopySpikeFromRemote(connect_mpi_->mpi_np_, max_spike_per_host_, @@ -537,7 +537,7 @@ int NESTGPU::SimulationStep() MPI_Barrier(MPI_COMM_WORLD); } #endif - + int n_spikes; time_mark = getRealTime(); // Call will get delayed until ClearGetSpikesArrays() @@ -545,8 +545,8 @@ int NESTGPU::SimulationStep() gpuErrchk(cudaMemcpyAsync(&n_spikes, d_SpikeNum, sizeof(int), cudaMemcpyDeviceToHost)); - ClearGetSpikeArrays(); - gpuErrchk( cudaDeviceSynchronize() ); + ClearGetSpikeArrays(); + gpuErrchk( cudaDeviceSynchronize() ); if (n_spikes > 0) { time_mark = getRealTime(); CollectSpikeKernel<<<n_spikes, 1024>>>(n_spikes, d_SpikeTargetNum); @@ -569,7 +569,7 @@ int NESTGPU::SimulationStep() int grid_dim_y = node_vect_[i]->n_port_; dim3 grid_dim(grid_dim_x, grid_dim_y); //dim3 block_dim(1024,1); - + GetSpikes<<<grid_dim, 1024>>> //block_dim>>> (node_vect_[i]->get_spike_array_, node_vect_[i]->n_node_, node_vect_[i]->n_port_, @@ -613,7 +613,7 @@ int NESTGPU::SimulationStep() if (n_rev_spikes > 0) { SynapseUpdateKernel<<<n_rev_spikes, 1024>>>(n_rev_spikes, d_RevSpikeNConn); gpuErrchk(cudaPeekAtLastError()); - } + } //RevSpikeBufferUpdate_time_ += (getRealTime() - time_mark); } @@ -630,7 +630,7 @@ int NESTGPU::SimulationStep() } it_++; - + return 0; } @@ -673,14 +673,14 @@ int NESTGPU::GetNodeSequenceOffset(int i_node, int n_node, int &i_group) if (i_node<0 || (i_node+n_node > (int)node_group_map_.size())) { throw ngpu_exception("Unrecognized node in getting node sequence offset"); } - i_group = node_group_map_[i_node]; + i_group = node_group_map_[i_node]; if (node_group_map_[i_node+n_node-1] != i_group) { throw ngpu_exception("Nodes belong to different node groups " "in setting parameter"); } return node_vect_[i_group]->i_node_0_; } - + std::vector<int> NESTGPU::GetNodeArrayWithOffset(int *i_node, int n_node, int &i_group) { @@ -711,7 +711,7 @@ int NESTGPU::SetNeuronParam(int i_node, int n_node, { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, n_node, i_group); - + return node_vect_[i_group]->SetScalParam(i_neuron, n_node, param_name, val); } @@ -747,21 +747,21 @@ int NESTGPU::SetNeuronParam( int *i_node, int n_node, int i_group; std::vector<int> nodes = GetNodeArrayWithOffset(i_node, n_node, i_group); - if (node_vect_[i_group]->IsPortParam(param_name)) { + if (node_vect_[i_group]->IsPortParam(param_name)) { return node_vect_[i_group]->SetPortParam(nodes.data(), n_node, param_name, param, array_size); } else { return node_vect_[i_group]->SetArrayParam(nodes.data(), n_node, param_name, param, array_size); - } + } } int NESTGPU::IsNeuronScalParam(int i_node, std::string param_name) { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, 1, i_group); - + return node_vect_[i_group]->IsScalParam(param_name); } @@ -769,7 +769,7 @@ int NESTGPU::IsNeuronPortParam(int i_node, std::string param_name) { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, 1, i_group); - + return node_vect_[i_group]->IsPortParam(param_name); } @@ -777,7 +777,7 @@ int NESTGPU::IsNeuronArrayParam(int i_node, std::string param_name) { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, 1, i_group); - + return node_vect_[i_group]->IsArrayParam(param_name); } @@ -786,7 +786,7 @@ int NESTGPU::SetNeuronIntVar(int i_node, int n_node, { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, n_node, i_group); - + return node_vect_[i_group]->SetIntVar(i_neuron, n_node, var_name, val); } @@ -805,7 +805,7 @@ int NESTGPU::SetNeuronVar(int i_node, int n_node, { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, n_node, i_group); - + return node_vect_[i_group]->SetScalVar(i_neuron, n_node, var_name, val); } @@ -841,21 +841,21 @@ int NESTGPU::SetNeuronVar( int *i_node, int n_node, int i_group; std::vector<int> nodes = GetNodeArrayWithOffset(i_node, n_node, i_group); - if (node_vect_[i_group]->IsPortVar(var_name)) { + if (node_vect_[i_group]->IsPortVar(var_name)) { return node_vect_[i_group]->SetPortVar(nodes.data(), n_node, var_name, var, array_size); } else { return node_vect_[i_group]->SetArrayVar(nodes.data(), n_node, var_name, var, array_size); - } + } } int NESTGPU::IsNeuronIntVar(int i_node, std::string var_name) { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, 1, i_group); - + return node_vect_[i_group]->IsIntVar(var_name); } @@ -863,7 +863,7 @@ int NESTGPU::IsNeuronScalVar(int i_node, std::string var_name) { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, 1, i_group); - + return node_vect_[i_group]->IsScalVar(var_name); } @@ -871,7 +871,7 @@ int NESTGPU::IsNeuronPortVar(int i_node, std::string var_name) { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, 1, i_group); - + return node_vect_[i_group]->IsPortVar(var_name); } @@ -879,7 +879,7 @@ int NESTGPU::IsNeuronArrayVar(int i_node, std::string var_name) { int i_group; int i_neuron = i_node - GetNodeSequenceOffset(i_node, 1, i_group); - + return node_vect_[i_group]->IsArrayVar(var_name); } @@ -943,7 +943,7 @@ float *NESTGPU::GetNeuronParam( int *i_node, int n_node, return node_vect_[i_group]->GetScalParam(nodes.data(), n_node, param_name); } - else if (node_vect_[i_group]->IsPortParam(param_name)) { + else if (node_vect_[i_group]->IsPortParam(param_name)) { return node_vect_[i_group]->GetPortParam(nodes.data(), n_node, param_name); } @@ -1032,7 +1032,7 @@ float *NESTGPU::GetNeuronVar(int *i_node, int n_node, return node_vect_[i_group]->GetScalVar(nodes.data(), n_node, var_name); } - else if (node_vect_[i_group]->IsPortVar(var_name)) { + else if (node_vect_[i_group]->IsPortVar(var_name)) { return node_vect_[i_group]->GetPortVar(nodes.data(), n_node, var_name); } @@ -1065,7 +1065,7 @@ int NESTGPU::ConnectMpiInit(int argc, char *argv[]) if (err==0) { mpi_flag_ = true; } - + return err; #else throw ngpu_exception("MPI is not available in your build"); @@ -1097,7 +1097,7 @@ int NESTGPU::ProcMaster() return connect_mpi_->ProcMaster(); #else throw ngpu_exception("MPI is not available in your build"); -#endif +#endif } int NESTGPU::MpiFinalize() @@ -1110,7 +1110,7 @@ int NESTGPU::MpiFinalize() MPI_Finalize(); } } - + return 0; #else throw ngpu_exception("MPI is not available in your build"); @@ -1151,10 +1151,10 @@ float *NESTGPU::RandomNormalClipped(size_t n, float mean, float stddev, float vmin, float vmax, float vstep) { const float epsi = 1.0e-6; - - n = (n/4 + 1)*4; + + n = (n/4 + 1)*4; int n_extra = n/10; - n_extra = (n_extra/4 + 1)*4; + n_extra = (n_extra/4 + 1)*4; if (n_extra<1024) { n_extra=1024; } @@ -1184,7 +1184,7 @@ float *NESTGPU::RandomNormalClipped(size_t n, float mean, float stddev, } } - return arr; + return arr; } int NESTGPU::BuildDirectConnections() @@ -1211,7 +1211,7 @@ int NESTGPU::BuildDirectConnections() } uint64_t n_dir_conn = dir_conn_vect.size(); node_vect_[iv]->n_dir_conn_ = n_dir_conn; - + DirectConnection *d_dir_conn_array; gpuErrchk(cudaMalloc(&d_dir_conn_array, n_dir_conn*sizeof(DirectConnection ))); @@ -1231,7 +1231,7 @@ std::vector<std::string> NESTGPU::GetIntVarNames(int i_node) throw ngpu_exception("Unrecognized node in reading variable names"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetIntVarNames(); } @@ -1241,7 +1241,7 @@ std::vector<std::string> NESTGPU::GetScalVarNames(int i_node) throw ngpu_exception("Unrecognized node in reading variable names"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetScalVarNames(); } @@ -1252,7 +1252,7 @@ int NESTGPU::GetNIntVar(int i_node) "variables"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetNIntVar(); } @@ -1263,7 +1263,7 @@ int NESTGPU::GetNScalVar(int i_node) "variables"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetNScalVar(); } @@ -1273,7 +1273,7 @@ std::vector<std::string> NESTGPU::GetPortVarNames(int i_node) throw ngpu_exception("Unrecognized node in reading variable names"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetPortVarNames(); } @@ -1284,7 +1284,7 @@ int NESTGPU::GetNPortVar(int i_node) "variables"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetNPortVar(); } @@ -1295,7 +1295,7 @@ std::vector<std::string> NESTGPU::GetScalParamNames(int i_node) throw ngpu_exception("Unrecognized node in reading parameter names"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetScalParamNames(); } @@ -1306,7 +1306,7 @@ int NESTGPU::GetNScalParam(int i_node) "parameters"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetNScalParam(); } @@ -1316,7 +1316,7 @@ std::vector<std::string> NESTGPU::GetPortParamNames(int i_node) throw ngpu_exception("Unrecognized node in reading parameter names"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetPortParamNames(); } @@ -1327,7 +1327,7 @@ int NESTGPU::GetNPortParam(int i_node) "parameters"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetNPortParam(); } @@ -1338,7 +1338,7 @@ std::vector<std::string> NESTGPU::GetArrayParamNames(int i_node) throw ngpu_exception("Unrecognized node in reading array parameter names"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetArrayParamNames(); } @@ -1349,7 +1349,7 @@ int NESTGPU::GetNArrayParam(int i_node) "parameters"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetNArrayParam(); } @@ -1360,7 +1360,7 @@ std::vector<std::string> NESTGPU::GetArrayVarNames(int i_node) throw ngpu_exception("Unrecognized node in reading array variable names"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetArrayVarNames(); } @@ -1371,7 +1371,7 @@ int NESTGPU::GetNArrayVar(int i_node) "variables"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetNArrayVar(); } @@ -1401,7 +1401,7 @@ std::vector<ConnectionStatus> NESTGPU::GetConnectionStatus(std::vector } return conn_stat_vect; } - + std::vector<ConnectionId> NESTGPU::GetConnections(int i_source, int n_source, int i_target, int n_target, int syn_group) { @@ -1415,7 +1415,7 @@ std::vector<ConnectionId> NESTGPU::GetConnections(int i_source, int n_source, } return net_connection_->GetConnections<int>(i_source, n_source, i_target, - n_target, syn_group); + n_target, syn_group); } std::vector<ConnectionId> NESTGPU::GetConnections(int *i_source, int n_source, @@ -1425,10 +1425,10 @@ std::vector<ConnectionId> NESTGPU::GetConnections(int *i_source, int n_source, i_target = 0; n_target = net_connection_->connection_.size(); } - + return net_connection_->GetConnections<int*>(i_source, n_source, i_target, n_target, syn_group); - + } @@ -1439,18 +1439,18 @@ std::vector<ConnectionId> NESTGPU::GetConnections(int i_source, int n_source, i_source = 0; n_source = net_connection_->connection_.size(); } - + return net_connection_->GetConnections<int>(i_source, n_source, i_target, - n_target, syn_group); + n_target, syn_group); } std::vector<ConnectionId> NESTGPU::GetConnections(int *i_source, int n_source, int *i_target, int n_target, int syn_group) { - + return net_connection_->GetConnections<int*>(i_source, n_source, i_target, n_target, syn_group); - + } @@ -1549,10 +1549,10 @@ int NESTGPU::GetRecSpikeTimes(int i_node, int n_node, int **n_spike_times_pt, throw ngpu_exception("Spike times must be extracted for all and only " " the nodes of the same group"); } - + return node_vect_[i_group]->GetRecSpikeTimes(n_spike_times_pt, spike_times_pt); - + } int NESTGPU::PushSpikesToNodes(int n_spikes, int *node_id, @@ -1589,7 +1589,7 @@ int NESTGPU::PushSpikesToNodes(int n_spikes, int *node_id) gpuErrchk(cudaMalloc(&d_node_id, n_spikes*sizeof(int))); // memcopy data transfer is overlapped with PushSpikeFromRemote kernel gpuErrchk(cudaMemcpyAsync(d_node_id, node_id, n_spikes*sizeof(int), - cudaMemcpyHostToDevice)); + cudaMemcpyHostToDevice)); PushSpikeFromRemote<<<(n_spikes+1023)/1024, 1024>>>(n_spikes, d_node_id); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); @@ -1604,7 +1604,7 @@ int NESTGPU::GetExtNeuronInputSpikes(int *n_spikes, int **node, int **port, ext_neuron_input_spike_node_.clear(); ext_neuron_input_spike_port_.clear(); ext_neuron_input_spike_height_.clear(); - + for (unsigned int i=0; i<node_vect_.size(); i++) { if (node_vect_[i]->IsExtNeuron()) { int n_node; @@ -1620,14 +1620,14 @@ int NESTGPU::GetExtNeuronInputSpikes(int *n_spikes, int **node, int **port, ext_neuron_input_spike_height_.push_back(sh[j]); } } - } + } } } *n_spikes = ext_neuron_input_spike_node_.size(); *node = ext_neuron_input_spike_node_.data(); *port = ext_neuron_input_spike_port_.data(); *spike_height = ext_neuron_input_spike_height_.data(); - + return 0; } @@ -1666,7 +1666,7 @@ std::vector<std::string> NESTGPU::GetGroupParamNames(int i_node) throw ngpu_exception("Unrecognized node in reading group parameter names"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetGroupParamNames(); } @@ -1677,7 +1677,7 @@ int NESTGPU::GetNGroupParam(int i_node) "group parameters"); } int i_group = node_group_map_[i_node]; - + return node_vect_[i_group]->GetNGroupParam(); } @@ -1688,8 +1688,8 @@ int NESTGPU::ConnectRemoteNodes() if (n_remote_node_>0) { i_remote_node_0_ = node_group_map_.size(); BaseNeuron *bn = new BaseNeuron; - node_vect_.push_back(bn); - CreateNodeGroup(n_remote_node_, 0); + node_vect_.push_back(bn); + CreateNodeGroup(n_remote_node_, 0); for (unsigned int i=0; i<remote_connection_vect_.size(); i++) { RemoteConnection rc = remote_connection_vect_[i]; net_connection_->Connect(i_remote_node_0_ + rc.i_source_rel, rc.i_target, @@ -1697,7 +1697,7 @@ int NESTGPU::ConnectRemoteNodes() } } - + return 0; } @@ -1713,7 +1713,7 @@ std::vector<std::string> NESTGPU::GetBoolParamNames() for (int i=0; i<N_KERNEL_BOOL_PARAM; i++) { param_name_vect.push_back(kernel_bool_param_name[i]); } - + return param_name_vect; } @@ -1736,7 +1736,7 @@ int NESTGPU::GetBoolParamIdx(std::string param_name) throw ngpu_exception(std::string("Unrecognized kernel boolean parameter ") + param_name); } - + return i_param; } @@ -1764,7 +1764,7 @@ int NESTGPU::SetBoolParam(std::string param_name, bool val) throw ngpu_exception(std::string("Unrecognized kernel boolean parameter ") + param_name); } - + return 0; } @@ -1780,7 +1780,7 @@ std::vector<std::string> NESTGPU::GetFloatParamNames() for (int i=0; i<N_KERNEL_FLOAT_PARAM; i++) { param_name_vect.push_back(kernel_float_param_name[i]); } - + return param_name_vect; } @@ -1803,7 +1803,7 @@ int NESTGPU::GetFloatParamIdx(std::string param_name) throw ngpu_exception(std::string("Unrecognized kernel float parameter ") + param_name); } - + return i_param; } @@ -1841,7 +1841,7 @@ int NESTGPU::SetFloatParam(std::string param_name, float val) throw ngpu_exception(std::string("Unrecognized kernel float parameter ") + param_name); } - + return 0; } @@ -1856,7 +1856,7 @@ std::vector<std::string> NESTGPU::GetIntParamNames() for (int i=0; i<N_KERNEL_INT_PARAM; i++) { param_name_vect.push_back(kernel_int_param_name[i]); } - + return param_name_vect; } @@ -1879,7 +1879,7 @@ int NESTGPU::GetIntParamIdx(std::string param_name) throw ngpu_exception(std::string("Unrecognized kernel int parameter ") + param_name); } - + return i_param; } @@ -1942,7 +1942,7 @@ int NESTGPU::SetIntParam(std::string param_name, int val) throw ngpu_exception(std::string("Unrecognized kernel int parameter ") + param_name); } - + return 0; } diff --git a/src/nestgpu.h b/src/nestgpu.h index 8b5fc3a78..9d7f437b3 100644 --- a/src/nestgpu.h +++ b/src/nestgpu.h @@ -57,9 +57,9 @@ class Sequence public: int i0; int n; - + Sequence(int i0=0, int n=0) : i0(i0), n(n) {} - + inline int operator[](int i) { if (i<0) { throw ngpu_exception("Sequence index cannot be negative"); @@ -96,7 +96,7 @@ class RemoteNodeSeq public: int i_host; NodeSeq node_seq; - + RemoteNodeSeq(int i_host=0, NodeSeq node_seq=NodeSeq(0,0)) : i_host(i_host), node_seq(node_seq) {} }; @@ -114,14 +114,14 @@ class NESTGPU Multimeter *multimeter_; std::vector<BaseNeuron*> node_vect_; // -> node_group_vect std::vector<SynModel*> syn_group_vect_; - + NetConnection *net_connection_; bool mpi_flag_; // true if MPI is initialized #ifdef HAVE_MPI ConnectMpi *connect_mpi_; #endif - + std::vector<signed char> node_group_map_; signed char *d_node_group_map_; @@ -172,7 +172,7 @@ class NESTGPU template <class T1, class T2> int _Connect(T1 source, int n_source, T2 target, int n_target, ConnSpec &conn_spec, SynSpec &syn_spec); - + template<class T1, class T2> int _SingleConnect(T1 source, int i_source, T2 target, int i_target, int i_array, SynSpec &syn_spec); @@ -220,12 +220,12 @@ class NESTGPU int _RemoteConnect(RemoteNode<T1> source, int n_source, RemoteNode<T2> target, int n_target, ConnSpec &conn_spec, SynSpec &syn_spec); - + template <class T1, class T2> int _RemoteConnectOneToOne (RemoteNode<T1> source, RemoteNode<T2> target, int n_node, SynSpec &syn_spec); - + template <class T1, class T2> int _RemoteConnectAllToAll (RemoteNode<T1> source, int n_source, RemoteNode<T2> target, int n_target, @@ -235,7 +235,7 @@ class NESTGPU int _RemoteConnectFixedTotalNumber (RemoteNode<T1> source, int n_source, RemoteNode<T2> target, int n_target, int n_conn, SynSpec &syn_spec); - + template <class T1, class T2> int _RemoteConnectFixedIndegree (RemoteNode<T1> source, int n_source, RemoteNode<T2> target, int n_target, @@ -270,7 +270,7 @@ class NESTGPU int SetRandomSeed(unsigned long long seed); int SetTimeResolution(float time_res); - + inline float GetTimeResolution() { return time_resolution_; } @@ -325,7 +325,7 @@ class NESTGPU int n_node=1, int n_port=1); int CreateRecord(std::string file_name, std::string *var_name_arr, - int *i_node_arr, int n_node); + int *i_node_arr, int n_node); int CreateRecord(std::string file_name, std::string *var_name_arr, int *i_node_arr, int *port_arr, int n_node); std::vector<std::vector<float> > *GetRecordData(int i_record); @@ -350,7 +350,7 @@ class NESTGPU int array_size) { return SetNeuronParam(nodes.i0, nodes.n, param_name, param, array_size); } - + int SetNeuronParam(std::vector<int> nodes, std::string param_name, float val) { return SetNeuronParam(nodes.data(), nodes.size(), param_name, val); @@ -397,7 +397,7 @@ class NESTGPU int array_size) { return SetNeuronVar(nodes.i0, nodes.n, var_name, var, array_size); } - + int SetNeuronVar(std::vector<int> nodes, std::string var_name, float val) { return SetNeuronVar(nodes.data(), nodes.size(), var_name, val); @@ -420,13 +420,13 @@ class NESTGPU float *GetNeuronParam(NodeSeq nodes, std::string param_name) { return GetNeuronParam(nodes.i0, nodes.n, param_name); } - + float *GetNeuronParam(std::vector<int> nodes, std::string param_name) { return GetNeuronParam(nodes.data(), nodes.size(), param_name); } float *GetArrayParam(int i_node, std::string param_name); - + int *GetNeuronIntVar(int i_node, int n_neuron, std::string var_name); int *GetNeuronIntVar(int *i_node, int n_neuron, std::string var_name); @@ -434,11 +434,11 @@ class NESTGPU int *GetNeuronIntVar(NodeSeq nodes, std::string var_name) { return GetNeuronIntVar(nodes.i0, nodes.n, var_name); } - + int *GetNeuronIntVar(std::vector<int> nodes, std::string var_name) { return GetNeuronIntVar(nodes.data(), nodes.size(), var_name); } - + float *GetNeuronVar(int i_node, int n_neuron, std::string var_name); float *GetNeuronVar(int *i_node, int n_neuron, std::string var_name); @@ -446,13 +446,13 @@ class NESTGPU float *GetNeuronVar(NodeSeq nodes, std::string var_name) { return GetNeuronVar(nodes.i0, nodes.n, var_name); } - + float *GetNeuronVar(std::vector<int> nodes, std::string var_name) { return GetNeuronVar(nodes.data(), nodes.size(), var_name); } float *GetArrayVar(int i_node, std::string param_name); - + int GetNodeSequenceOffset(int i_node, int n_node, int &i_group); std::vector<int> GetNodeArrayWithOffset(int *i_node, int n_node, @@ -465,18 +465,18 @@ class NESTGPU int IsNeuronArrayParam(int i_node, std::string param_name); int IsNeuronIntVar(int i_node, std::string var_name); - + int IsNeuronScalVar(int i_node, std::string var_name); int IsNeuronPortVar(int i_node, std::string var_name); int IsNeuronArrayVar(int i_node, std::string var_name); - + int SetSpikeGenerator(int i_node, int n_spikes, float *spike_time, float *spike_height); int Calibrate(); - + int Simulate(); int Simulate(float sim_time); @@ -486,7 +486,7 @@ class NESTGPU int SimulationStep(); int EndSimulation(); - + int ConnectMpiInit(int argc, char *argv[]); int MpiId(); @@ -498,9 +498,9 @@ class NESTGPU int MpiFinalize(); std::string MpiRankStr(); - + void SetErrorFlag(bool error_flag) {error_flag_ = error_flag;} - + void SetErrorMessage(std::string error_message) { error_message_ = error_message; } @@ -517,13 +517,13 @@ class NESTGPU int OnException() {return on_exception_;} unsigned int *RandomInt(size_t n); - + float *RandomUniform(size_t n); float *RandomNormal(size_t n, float mean, float stddev); float *RandomNormalClipped(size_t n, float mean, float stddev, float vmin, - float vmax, float vstep); + float vmax, float vstep); int Connect ( @@ -592,23 +592,23 @@ class NESTGPU std::vector<std::string> GetScalVarNames(int i_node); int GetNIntVar(int i_node); - + std::vector<std::string> GetIntVarNames(int i_node); int GetNScalVar(int i_node); - + std::vector<std::string> GetPortVarNames(int i_node); int GetNPortVar(int i_node); - + std::vector<std::string> GetScalParamNames(int i_node); int GetNScalParam(int i_node); - + std::vector<std::string> GetPortParamNames(int i_node); int GetNPortParam(int i_node); - + std::vector<std::string> GetArrayParamNames(int i_node); int GetNArrayParam(int i_node); @@ -618,11 +618,11 @@ class NESTGPU std::vector<std::string> GetGroupParamNames(int i_node); int GetNGroupParam(int i_node); - + int GetNArrayVar(int i_node); ConnectionStatus GetConnectionStatus(ConnectionId conn_id); - + std::vector<ConnectionStatus> GetConnectionStatus(std::vector<ConnectionId> &conn_id_vect); @@ -641,7 +641,7 @@ class NESTGPU std::vector<ConnectionId> GetConnections(int *i_source, int n_source, int *i_target, int n_target, int syn_group=-1); - + std::vector<ConnectionId> GetConnections(NodeSeq source, NodeSeq target, int syn_group=-1); @@ -673,13 +673,13 @@ class NESTGPU int SynGroupCalibrate(); int ActivateSpikeCount(int i_node, int n_node); - + int ActivateSpikeCount(NodeSeq nodes) { return ActivateSpikeCount(nodes.i0, nodes.n); } int ActivateRecSpikeTimes(int i_node, int n_node, int max_n_rec_spike_times); - + int ActivateRecSpikeTimes(NodeSeq nodes, int max_n_rec_spike_times) { return ActivateRecSpikeTimes(nodes.i0, nodes.n, max_n_rec_spike_times); } @@ -702,7 +702,7 @@ class NESTGPU } int PushSpikesToNodes(int n_spikes, int *node_id, float *spike_height); - + int PushSpikesToNodes(int n_spikes, int *node_id); int GetExtNeuronInputSpikes(int *n_spikes, int **node, int **port, @@ -710,7 +710,7 @@ class NESTGPU int SetNeuronGroupParam(int i_node, int n_node, std::string param_name, float val); - + int IsNeuronGroupParam(int i_node, std::string param_name); float GetNeuronGroupParam(int i_node, std::string param_name); diff --git a/src/nestgpu_C.cpp b/src/nestgpu_C.cpp index 23d8d67f0..d41556a7d 100644 --- a/src/nestgpu_C.cpp +++ b/src/nestgpu_C.cpp @@ -44,7 +44,7 @@ extern "C" { NESTGPU_instance = new NESTGPU(); } } - + char *NESTGPU_GetErrorMessage() { checkNESTGPUInstance(); @@ -65,7 +65,7 @@ extern "C" { } unsigned int *RandomInt(size_t n); - + int NESTGPU_SetRandomSeed(unsigned long long seed) { int ret = 0; BEGIN_ERR_PROP { ret = NESTGPU_instance->SetRandomSeed(seed); @@ -101,7 +101,7 @@ extern "C" { ret = NESTGPU_instance->SetVerbosityLevel(verbosity_level); } END_ERR_PROP return ret; } - + int NESTGPU_Create(char *model_name, int n_neuron, int n_port) { int ret = 0; BEGIN_ERR_PROP { std::string model_name_str = std::string(model_name); @@ -116,7 +116,7 @@ extern "C" { ret = pg[0]; } END_ERR_PROP return ret; } - + int NESTGPU_CreateRecord(char *file_name, char *var_name_arr[], int *i_node_arr, int *port_arr, int n_node) @@ -129,9 +129,9 @@ extern "C" { } ret = NESTGPU_instance->CreateRecord (file_name_str, var_name_vect.data(), i_node_arr, port_arr, - n_node); + n_node); } END_ERR_PROP return ret; } - + int NESTGPU_GetRecordDataRows(int i_record) { int ret = 0; BEGIN_ERR_PROP { std::vector<std::vector<float> > *data_vect_pt @@ -139,12 +139,12 @@ extern "C" { ret = data_vect_pt->size(); } END_ERR_PROP return ret; } - + int NESTGPU_GetRecordDataColumns(int i_record) { int ret = 0; BEGIN_ERR_PROP { std::vector<std::vector<float> > *data_vect_pt = NESTGPU_instance->GetRecordData(i_record); - + ret = data_vect_pt->at(0).size(); } END_ERR_PROP return ret; } @@ -162,7 +162,7 @@ extern "C" { int NESTGPU_SetNeuronScalParam(int i_node, int n_neuron, char *param_name, float val) { int ret = 0; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->SetNeuronParam(i_node, n_neuron, param_name_str, val); @@ -172,7 +172,7 @@ extern "C" { char *param_name, float *param, int array_size) { int ret = 0; BEGIN_ERR_PROP { - std::string param_name_str = std::string(param_name); + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->SetNeuronParam(i_node, n_neuron, param_name_str, param, array_size); @@ -190,38 +190,38 @@ extern "C" { char *param_name, float *param, int array_size) { int ret = 0; BEGIN_ERR_PROP { - std::string param_name_str = std::string(param_name); + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->SetNeuronParam(i_node, n_neuron, param_name_str, param, array_size); } END_ERR_PROP return ret; } - + int NESTGPU_IsNeuronScalParam(int i_node, char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->IsNeuronScalParam(i_node, param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_IsNeuronPortParam(int i_node, char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->IsNeuronPortParam(i_node, param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_IsNeuronArrayParam(int i_node, char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->IsNeuronArrayParam(i_node, param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_SetNeuronIntVar(int i_node, int n_neuron, char *var_name, int val) { int ret = 0; BEGIN_ERR_PROP { - + std::string var_name_str = std::string(var_name); ret = NESTGPU_instance->SetNeuronIntVar(i_node, n_neuron, var_name_str, val); @@ -230,7 +230,7 @@ extern "C" { int NESTGPU_SetNeuronScalVar(int i_node, int n_neuron, char *var_name, float val) { int ret = 0; BEGIN_ERR_PROP { - + std::string var_name_str = std::string(var_name); ret = NESTGPU_instance->SetNeuronVar(i_node, n_neuron, var_name_str, val); @@ -240,7 +240,7 @@ extern "C" { char *var_name, float *var, int array_size) { int ret = 0; BEGIN_ERR_PROP { - std::string var_name_str = std::string(var_name); + std::string var_name_str = std::string(var_name); ret = NESTGPU_instance->SetNeuronVar(i_node, n_neuron, var_name_str, var, array_size); @@ -266,61 +266,61 @@ extern "C" { char *var_name, float *var, int array_size) { int ret = 0; BEGIN_ERR_PROP { - std::string var_name_str = std::string(var_name); + std::string var_name_str = std::string(var_name); ret = NESTGPU_instance->SetNeuronVar(i_node, n_neuron, var_name_str, var, array_size); } END_ERR_PROP return ret; } - + int NESTGPU_IsNeuronIntVar(int i_node, char *var_name) { int ret = 0; BEGIN_ERR_PROP { std::string var_name_str = std::string(var_name); ret = NESTGPU_instance->IsNeuronIntVar(i_node, var_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_IsNeuronScalVar(int i_node, char *var_name) { int ret = 0; BEGIN_ERR_PROP { std::string var_name_str = std::string(var_name); - + ret = NESTGPU_instance->IsNeuronScalVar(i_node, var_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_IsNeuronPortVar(int i_node, char *var_name) { int ret = 0; BEGIN_ERR_PROP { std::string var_name_str = std::string(var_name); - + ret = NESTGPU_instance->IsNeuronPortVar(i_node, var_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_IsNeuronArrayVar(int i_node, char *var_name) { int ret = 0; BEGIN_ERR_PROP { std::string var_name_str = std::string(var_name); - + ret = NESTGPU_instance->IsNeuronArrayVar(i_node, var_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_GetNeuronParamSize(int i_node, char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->GetNeuronParamSize(i_node, param_name_str); } END_ERR_PROP return ret; } - - + + int NESTGPU_GetNeuronVarSize(int i_node, char *var_name) { int ret = 0; BEGIN_ERR_PROP { std::string var_name_str = std::string(var_name); - + ret = NESTGPU_instance->GetNeuronVarSize(i_node, var_name_str); } END_ERR_PROP return ret; } - - + + float *NESTGPU_GetNeuronParam(int i_node, int n_neuron, char *param_name) { float *ret = NULL; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->GetNeuronParam(i_node, n_neuron, param_name_str); @@ -338,16 +338,16 @@ extern "C" { float *NESTGPU_GetArrayParam(int i_node, char *param_name) { float *ret = NULL; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->GetArrayParam(i_node, param_name_str); } END_ERR_PROP return ret; } - + int *NESTGPU_GetNeuronIntVar(int i_node, int n_neuron, char *param_name) { int *ret = NULL; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->GetNeuronIntVar(i_node, n_neuron, param_name_str); @@ -365,7 +365,7 @@ extern "C" { float *NESTGPU_GetNeuronVar(int i_node, int n_neuron, char *param_name) { float *ret = NULL; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->GetNeuronVar(i_node, n_neuron, param_name_str); @@ -382,7 +382,7 @@ extern "C" { float *NESTGPU_GetArrayVar(int i_node, char *var_name) { float *ret = NULL; BEGIN_ERR_PROP { - + std::string var_name_str = std::string(var_name); ret = NESTGPU_instance->GetArrayVar(i_node, var_name_str); } END_ERR_PROP return ret; } @@ -441,24 +441,24 @@ extern "C" { { unsigned int *ret = NULL; BEGIN_ERR_PROP { ret = NESTGPU_instance->RandomInt(n); } END_ERR_PROP return ret; } - + float *NESTGPU_RandomUniform(size_t n) { float* ret = NULL; BEGIN_ERR_PROP { ret = NESTGPU_instance->RandomUniform(n); } END_ERR_PROP return ret; } - + float *NESTGPU_RandomNormal(size_t n, float mean, float stddev) { float *ret = NULL; BEGIN_ERR_PROP { ret = NESTGPU_instance->RandomNormal(n, mean, stddev); } END_ERR_PROP return ret; } - + float *NESTGPU_RandomNormalClipped(size_t n, float mean, float stddev, float vmin, float vmax, float vstep) { float *ret = NULL; BEGIN_ERR_PROP { ret = NESTGPU_instance->RandomNormalClipped(n, mean, stddev, vmin, vmax, vstep); } END_ERR_PROP return ret; } - + int NESTGPU_Connect(int i_source_node, int i_target_node, unsigned char port, unsigned char syn_group, float weight, float delay) @@ -529,14 +529,14 @@ extern "C" { int n_target) { int ret = 0; BEGIN_ERR_PROP { ret = NESTGPU_instance->Connect(i_source, n_source, i_target, n_target, - ConnSpec_instance, SynSpec_instance); + ConnSpec_instance, SynSpec_instance); } END_ERR_PROP return ret; } int NESTGPU_ConnectSeqGroup(int i_source, int n_source, int *i_target, int n_target) { int ret = 0; BEGIN_ERR_PROP { ret = NESTGPU_instance->Connect(i_source, n_source, i_target, n_target, - ConnSpec_instance, SynSpec_instance); + ConnSpec_instance, SynSpec_instance); } END_ERR_PROP return ret; } int NESTGPU_ConnectGroupSeq(int *i_source, int n_source, int i_target, @@ -560,7 +560,7 @@ extern "C" { ret = NESTGPU_instance->RemoteConnect(i_source_host, i_source, n_source, i_target_host, i_target, n_target, ConnSpec_instance, - SynSpec_instance); + SynSpec_instance); } END_ERR_PROP return ret; } int NESTGPU_RemoteConnectSeqGroup(int i_source_host, int i_source, @@ -570,7 +570,7 @@ extern "C" { ret = NESTGPU_instance->RemoteConnect(i_source_host, i_source, n_source, i_target_host, i_target, n_target, ConnSpec_instance, - SynSpec_instance); + SynSpec_instance); } END_ERR_PROP return ret; } int NESTGPU_RemoteConnectGroupSeq(int i_source_host, int *i_source, @@ -604,12 +604,12 @@ extern "C" { for (unsigned int i=0; i<var_name_vect.size(); i++) { char *var_name = (char*)malloc((var_name_vect[i].length() + 1) *sizeof(char)); - + strcpy(var_name, var_name_vect[i].c_str()); var_name_array[i] = var_name; } ret = var_name_array; - + } END_ERR_PROP return ret; } char **NESTGPU_GetScalVarNames(int i_node) @@ -621,12 +621,12 @@ extern "C" { for (unsigned int i=0; i<var_name_vect.size(); i++) { char *var_name = (char*)malloc((var_name_vect[i].length() + 1) *sizeof(char)); - + strcpy(var_name, var_name_vect[i].c_str()); var_name_array[i] = var_name; } ret = var_name_array; - + } END_ERR_PROP return ret; } int NESTGPU_GetNIntVar(int i_node) @@ -649,21 +649,21 @@ extern "C" { for (unsigned int i=0; i<var_name_vect.size(); i++) { char *var_name = (char*)malloc((var_name_vect[i].length() + 1) *sizeof(char)); - + strcpy(var_name, var_name_vect[i].c_str()); var_name_array[i] = var_name; } ret = var_name_array; - + } END_ERR_PROP return ret; } - + int NESTGPU_GetNPortVar(int i_node) { int ret = 0; BEGIN_ERR_PROP { ret = NESTGPU_instance->GetNPortVar(i_node); } END_ERR_PROP return ret; } - + char **NESTGPU_GetScalParamNames(int i_node) { char **ret = NULL; BEGIN_ERR_PROP { std::vector<std::string> var_name_vect = @@ -673,15 +673,15 @@ extern "C" { for (unsigned int i=0; i<var_name_vect.size(); i++) { char *var_name = (char*)malloc((var_name_vect[i].length() + 1) *sizeof(char)); - + strcpy(var_name, var_name_vect[i].c_str()); var_name_array[i] = var_name; } ret = var_name_array; - + } END_ERR_PROP return ret; } - + int NESTGPU_GetNScalParam(int i_node) { int ret = 0; BEGIN_ERR_PROP { ret = NESTGPU_instance->GetNScalParam(i_node); @@ -697,15 +697,15 @@ extern "C" { for (unsigned int i=0; i<var_name_vect.size(); i++) { char *var_name = (char*)malloc((var_name_vect[i].length() + 1) *sizeof(char)); - + strcpy(var_name, var_name_vect[i].c_str()); var_name_array[i] = var_name; } ret = var_name_array; - + } END_ERR_PROP return ret; } - + int NESTGPU_GetNGroupParam(int i_node) { int ret = 0; BEGIN_ERR_PROP { ret = NESTGPU_instance->GetNGroupParam(i_node); @@ -721,15 +721,15 @@ extern "C" { for (unsigned int i=0; i<var_name_vect.size(); i++) { char *var_name = (char*)malloc((var_name_vect[i].length() + 1) *sizeof(char)); - + strcpy(var_name, var_name_vect[i].c_str()); var_name_array[i] = var_name; } ret = var_name_array; - + } END_ERR_PROP return ret; } - + int NESTGPU_GetNPortParam(int i_node) { int ret = 0; BEGIN_ERR_PROP { ret = NESTGPU_instance->GetNPortParam(i_node); @@ -745,15 +745,15 @@ extern "C" { for (unsigned int i=0; i<var_name_vect.size(); i++) { char *var_name = (char*)malloc((var_name_vect[i].length() + 1) *sizeof(char)); - + strcpy(var_name, var_name_vect[i].c_str()); var_name_array[i] = var_name; } ret = var_name_array; - + } END_ERR_PROP return ret; } - + int NESTGPU_GetNArrayParam(int i_node) { int ret = 0; BEGIN_ERR_PROP { ret = NESTGPU_instance->GetNArrayParam(i_node); @@ -768,12 +768,12 @@ extern "C" { for (unsigned int i=0; i<var_name_vect.size(); i++) { char *var_name = (char*)malloc((var_name_vect[i].length() + 1) *sizeof(char)); - + strcpy(var_name, var_name_vect[i].c_str()); var_name_array[i] = var_name; } ret = var_name_array; - + } END_ERR_PROP return ret; } int NESTGPU_GetNArrayVar(int i_node) @@ -865,7 +865,7 @@ extern "C" { *syn_group = conn_stat.syn_group; *delay = conn_stat.delay; *weight = conn_stat.weight; - + ret = 0; } END_ERR_PROP return ret; } @@ -883,7 +883,7 @@ extern "C" { ret = NESTGPU_instance->GetSynGroupNParam(i_syn_group); } END_ERR_PROP return ret; } - + char **NESTGPU_GetSynGroupParamNames(int i_syn_group) { char **ret = NULL; BEGIN_ERR_PROP { std::vector<std::string> name_vect = @@ -893,59 +893,59 @@ extern "C" { for (unsigned int i=0; i<name_vect.size(); i++) { char *param_name = (char*)malloc((name_vect[i].length() + 1) *sizeof(char)); - + strcpy(param_name, name_vect[i].c_str()); name_array[i] = param_name; } ret = name_array; - + } END_ERR_PROP return ret; } - + int NESTGPU_IsSynGroupParam(int i_syn_group, char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->IsSynGroupParam(i_syn_group, param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_GetSynGroupParamIdx(int i_syn_group, char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->GetSynGroupParamIdx(i_syn_group, param_name_str); } END_ERR_PROP return ret; } - + float NESTGPU_GetSynGroupParam(int i_syn_group, char *param_name) { float ret = 0.0; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->GetSynGroupParam(i_syn_group, param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_SetSynGroupParam(int i_syn_group, char *param_name, float val) { float ret = 0.0; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->SetSynGroupParam(i_syn_group, param_name_str, val); } END_ERR_PROP return ret; } - + int NESTGPU_ActivateSpikeCount(int i_node, int n_node) { int ret = 0; BEGIN_ERR_PROP { - + ret = NESTGPU_instance->ActivateSpikeCount(i_node, n_node); } END_ERR_PROP return ret; } - + int NESTGPU_ActivateRecSpikeTimes(int i_node, int n_node, int max_n_rec_spike_times) { int ret = 0; BEGIN_ERR_PROP { - + ret = NESTGPU_instance->ActivateRecSpikeTimes(i_node, n_node, max_n_rec_spike_times); } END_ERR_PROP return ret; } @@ -953,15 +953,15 @@ extern "C" { int NESTGPU_SetRecSpikeTimesStep(int i_node, int n_node, int rec_spike_times_step) { int ret = 0; BEGIN_ERR_PROP { - + ret = NESTGPU_instance->SetRecSpikeTimesStep(i_node, n_node, rec_spike_times_step); } END_ERR_PROP return ret; } - + int NESTGPU_GetNRecSpikeTimes(int i_node) { int ret = 0; BEGIN_ERR_PROP { - + ret = NESTGPU_instance->GetNRecSpikeTimes(i_node); } END_ERR_PROP return ret; } @@ -971,19 +971,19 @@ extern "C" { { int ret = 0; BEGIN_ERR_PROP { ret = NESTGPU_instance->GetRecSpikeTimes(i_node, n_node, n_spike_times_pt, spike_times_pt); - + } END_ERR_PROP return ret; } - + int NESTGPU_PushSpikesToNodes(int n_spikes, int *node_id) { int ret = 0; BEGIN_ERR_PROP { - + ret = NESTGPU_instance->PushSpikesToNodes(n_spikes, node_id); } END_ERR_PROP return ret; } int NESTGPU_GetExtNeuronInputSpikes(int *n_spikes, int **node, int **port, float **spike_height, int include_zeros) { int ret = 0; BEGIN_ERR_PROP { - + ret = NESTGPU_instance->GetExtNeuronInputSpikes(n_spikes, node, port, spike_height, include_zeros>0); @@ -992,7 +992,7 @@ extern "C" { int NESTGPU_SetNeuronGroupParam(int i_node, int n_node, char *param_name, float val) { float ret = 0.0; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->SetNeuronGroupParam(i_node, n_node, param_name_str, @@ -1002,14 +1002,14 @@ extern "C" { int NESTGPU_IsNeuronGroupParam(int i_node, char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->IsNeuronGroupParam(i_node, param_name_str); } END_ERR_PROP return ret; } float NESTGPU_GetNeuronGroupParam(int i_node, char *param_name) { float ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->GetNeuronGroupParam(i_node, param_name_str); } END_ERR_PROP return ret; } @@ -1019,7 +1019,7 @@ extern "C" { ret = NESTGPU_instance->GetNBoolParam(); } END_ERR_PROP return ret; } - + char **NESTGPU_GetBoolParamNames() { char **ret = NULL; BEGIN_ERR_PROP { std::vector<std::string> name_vect = @@ -1029,43 +1029,43 @@ extern "C" { for (unsigned int i=0; i<name_vect.size(); i++) { char *param_name = (char*)malloc((name_vect[i].length() + 1) *sizeof(char)); - + strcpy(param_name, name_vect[i].c_str()); name_array[i] = param_name; } ret = name_array; - + } END_ERR_PROP return ret; } - + int NESTGPU_IsBoolParam(char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->IsBoolParam(param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_GetBoolParamIdx(char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->GetBoolParamIdx(param_name_str); } END_ERR_PROP return ret; } - + bool NESTGPU_GetBoolParam(char *param_name) { bool ret = true; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->GetBoolParam(param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_SetBoolParam(char *param_name, bool val) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->SetBoolParam(param_name_str, val); } END_ERR_PROP return ret; } @@ -1075,7 +1075,7 @@ extern "C" { ret = NESTGPU_instance->GetNFloatParam(); } END_ERR_PROP return ret; } - + char **NESTGPU_GetFloatParamNames() { char **ret = NULL; BEGIN_ERR_PROP { std::vector<std::string> name_vect = @@ -1085,43 +1085,43 @@ extern "C" { for (unsigned int i=0; i<name_vect.size(); i++) { char *param_name = (char*)malloc((name_vect[i].length() + 1) *sizeof(char)); - + strcpy(param_name, name_vect[i].c_str()); name_array[i] = param_name; } ret = name_array; - + } END_ERR_PROP return ret; } - + int NESTGPU_IsFloatParam(char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->IsFloatParam(param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_GetFloatParamIdx(char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->GetFloatParamIdx(param_name_str); } END_ERR_PROP return ret; } - + float NESTGPU_GetFloatParam(char *param_name) { float ret = 0.0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->GetFloatParam(param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_SetFloatParam(char *param_name, float val) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->SetFloatParam(param_name_str, val); } END_ERR_PROP return ret; } @@ -1130,7 +1130,7 @@ extern "C" { ret = NESTGPU_instance->GetNIntParam(); } END_ERR_PROP return ret; } - + char **NESTGPU_GetIntParamNames() { char **ret = NULL; BEGIN_ERR_PROP { std::vector<std::string> name_vect = @@ -1140,42 +1140,42 @@ extern "C" { for (unsigned int i=0; i<name_vect.size(); i++) { char *param_name = (char*)malloc((name_vect[i].length() + 1) *sizeof(char)); - + strcpy(param_name, name_vect[i].c_str()); name_array[i] = param_name; } ret = name_array; - + } END_ERR_PROP return ret; } - + int NESTGPU_IsIntParam(char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->IsIntParam(param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_GetIntParamIdx(char *param_name) { int ret = 0; BEGIN_ERR_PROP { std::string param_name_str = std::string(param_name); - + ret = NESTGPU_instance->GetIntParamIdx(param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_GetIntParam(char *param_name) { int ret = 0; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->GetIntParam(param_name_str); } END_ERR_PROP return ret; } - + int NESTGPU_SetIntParam(char *param_name, int val) { int ret = 0; BEGIN_ERR_PROP { - + std::string param_name_str = std::string(param_name); ret = NESTGPU_instance->SetIntParam(param_name_str, val); } END_ERR_PROP return ret; } diff --git a/src/nestgpu_C.h b/src/nestgpu_C.h index fe642dddc..653f59143 100644 --- a/src/nestgpu_C.h +++ b/src/nestgpu_C.h @@ -30,11 +30,11 @@ #ifdef __cplusplus extern "C" { #endif - + char *NESTGPU_GetErrorMessage(); unsigned char NESTGPU_GetErrorCode(); - + void NESTGPU_SetOnException(int on_exception); int NESTGPU_SetRandomSeed(unsigned long long seed); @@ -54,13 +54,13 @@ extern "C" { int NESTGPU_Create(char *model_name, int n_neuron, int n_port); int NESTGPU_CreatePoissonGenerator(int n_node, float rate); - + int NESTGPU_CreateRecord(char *file_name, char *var_name_arr[], int *i_node_arr, int *port_arr, int n_node); - + int NESTGPU_GetRecordDataRows(int i_record); - + int NESTGPU_GetRecordDataColumns(int i_record); float **NESTGPU_GetRecordData(int i_record); @@ -78,13 +78,13 @@ extern "C" { int NESTGPU_SetNeuronPtArrayParam(int *i_node, int n_neuron, char *param_name, float *param, int array_size); - + int NESTGPU_IsNeuronScalParam(int i_node, char *param_name); - + int NESTGPU_IsNeuronPortParam(int i_node, char *param_name); int NESTGPU_IsNeuronArrayParam(int i_node, char *param_name); - + int NESTGPU_SetNeuronIntVar(int i_node, int n_neuron, char *var_name, int val); @@ -105,11 +105,11 @@ extern "C" { int NESTGPU_SetNeuronPtArrayVar(int *i_node, int n_neuron, char *var_name, float *var, int array_size); - + int NESTGPU_IsNeuronIntVar(int i_node, char *var_name); int NESTGPU_IsNeuronScalVar(int i_node, char *var_name); - + int NESTGPU_IsNeuronPortVar(int i_node, char *var_name); int NESTGPU_IsNeuronArrayVar(int i_node, char *var_name); @@ -117,7 +117,7 @@ extern "C" { int NESTGPU_GetNeuronParamSize(int i_node, char *param_name); int NESTGPU_GetNeuronVarSize(int i_node, char *var_name); - + float *NESTGPU_GetNeuronParam(int i_node, int n_neuron, char *param_name); @@ -131,15 +131,15 @@ extern "C" { int *NESTGPU_GetNeuronPtIntVar(int *i_node, int n_neuron, char *param_name); - + float *NESTGPU_GetNeuronVar(int i_node, int n_neuron, char *param_name); float *NESTGPU_GetNeuronPtVar(int *i_node, int n_neuron, char *param_name); - + float *NESTGPU_GetArrayVar(int i_node, char *var_name); - + int NESTGPU_Calibrate(); int NESTGPU_Simulate(); @@ -161,14 +161,14 @@ extern "C" { int NESTGPU_MpiFinalize(); unsigned int *NESTGPU_RandomInt(size_t n); - + float *NESTGPU_RandomUniform(size_t n); - + float *NESTGPU_RandomNormal(size_t n, float mean, float stddev); - + float *NESTGPU_RandomNormalClipped(size_t n, float mean, float stddev, float vmin, float vmax, float vstep); - + int NESTGPU_Connect(int i_source_node, int i_target_node, unsigned char port, unsigned char syn_group, float weight, float delay); @@ -224,35 +224,35 @@ extern "C" { char **NESTGPU_GetIntVarNames(int i_node); char **NESTGPU_GetScalVarNames(int i_node); - + int NESTGPU_GetNIntVar(int i_node); int NESTGPU_GetNScalVar(int i_node); - + char **NESTGPU_GetPortVarNames(int i_node); - + int NESTGPU_GetNPortVar(int i_node); - + char **NESTGPU_GetScalParamNames(int i_node); - + int NESTGPU_GetNScalParam(int i_node); - + char **NESTGPU_GetPortParamNames(int i_node); int NESTGPU_GetNGroupParam(int i_node); - + char **NESTGPU_GetGroupParamNames(int i_node); int NESTGPU_GetNPortParam(int i_node); char **NESTGPU_GetArrayParamNames(int i_node); - + int NESTGPU_GetNArrayParam(int i_node); char **NESTGPU_GetArrayVarNames(int i_node); - + int NESTGPU_GetNArrayVar(int i_node); - + int *NESTGPU_GetSeqSeqConnections(int i_source, int n_source, int i_target, int n_target, int syn_group, int *n_conn); @@ -273,27 +273,27 @@ extern "C" { float *weight); int NESTGPU_CreateSynGroup(char *model_name); - + int NESTGPU_GetSynGroupNParam(int i_syn_group); - + char **NESTGPU_GetSynGroupParamNames(int i_syn_group); - + int NESTGPU_IsSynGroupParam(int i_syn_group, char *param_name); - + int NESTGPU_GetSynGroupParamIdx(int i_syn_group, char *param_name); - + float NESTGPU_GetSynGroupParam(int i_syn_group, char *param_name); - + int NESTGPU_SetSynGroupParam(int i_syn_group, char *param_name, float val); int NESTGPU_ActivateSpikeCount(int i_node, int n_node); int NESTGPU_ActivateRecSpikeTimes(int i_node, int n_node, int max_n_rec_spike_times); - + int NESTGPU_SetRecSpikeTimesStep(int i_node, int n_node, int rec_spike_times_step); - + int NESTGPU_GetNRecSpikeTimes(int i_node); int NESTGPU_GetRecSpikeTimes(int i_node, int n_node, @@ -301,7 +301,7 @@ extern "C" { float ***spike_times_pt); int NESTGPU_PushSpikesToNodes(int n_spikes, int *node_id); - + int NESTGPU_GetExtNeuronInputSpikes(int *n_spikes, int **node, int **port, float **spike_height, int include_zeros); @@ -314,39 +314,39 @@ extern "C" { float NESTGPU_GetNeuronGroupParam(int i_node, char *param_name); int NESTGPU_GetNBoolParam(); - + char **NESTGPU_GetBoolParamNames(); - + int NESTGPU_IsBoolParam(char *param_name); - + int NESTGPU_GetBoolParamIdx(char *param_name); - + bool NESTGPU_GetBoolParam(char *param_name); - + int NESTGPU_SetBoolParam(char *param_name, bool val); int NESTGPU_GetNFloatParam(); - + char **NESTGPU_GetFloatParamNames(); - + int NESTGPU_IsFloatParam(char *param_name); - + int NESTGPU_GetFloatParamIdx(char *param_name); - + float NESTGPU_GetFloatParam(char *param_name); - + int NESTGPU_SetFloatParam(char *param_name, float val); int NESTGPU_GetNIntParam(); - + char **NESTGPU_GetIntParamNames(); - + int NESTGPU_IsIntParam(char *param_name); - + int NESTGPU_GetIntParamIdx(char *param_name); - + int NESTGPU_GetIntParam(char *param_name); - + int NESTGPU_SetIntParam(char *param_name, int val); int NESTGPU_RemoteCreate(int i_host, char *model_name, int n_neuron, diff --git a/src/neuron_models.h b/src/neuron_models.h index 16206b7f9..fc169a006 100644 --- a/src/neuron_models.h +++ b/src/neuron_models.h @@ -29,7 +29,7 @@ enum NeuronModels { i_null_model = 0, - i_iaf_psc_exp_g_model, + i_iaf_psc_exp_g_model, i_iaf_psc_exp_hc_model, i_iaf_psc_exp_model, i_iaf_psc_alpha_model, diff --git a/src/ngpu_exception.h b/src/ngpu_exception.h index 69573b61a..04a4d0df9 100644 --- a/src/ngpu_exception.h +++ b/src/ngpu_exception.h @@ -38,7 +38,7 @@ class ngpu_exception: public std::exception { const char *Message; // error message - + public: // constructors ngpu_exception(const char *ch) {Message=strdup(ch);} diff --git a/src/node_group.cu b/src/node_group.cu index 86d6edcbb..96626ddac 100644 --- a/src/node_group.cu +++ b/src/node_group.cu @@ -60,7 +60,7 @@ int NESTGPU::NodeGroupArrayInit() ngs.n_rec_spike_times_ = node_vect_[i]->n_rec_spike_times_; ngs.max_n_rec_spike_times_ = node_vect_[i]->max_n_rec_spike_times_; ngs.den_delay_arr_ = node_vect_[i]->den_delay_arr_; - + ngs_vect.push_back(ngs); } gpuErrchk(cudaMemcpyToSymbolAsync(NodeGroupArray, ngs_vect.data(), @@ -83,13 +83,13 @@ double *NESTGPU::InitGetSpikeArray (int n_node, int n_port) gpuErrchk(cudaMalloc(&d_get_spike_array, n_node*n_port *sizeof(double))); } - + return d_get_spike_array; } int NESTGPU::FreeNodeGroupMap() { gpuErrchk(cudaFree(d_node_group_map_)); - + return 0; } diff --git a/src/nota.txt b/src/nota.txt index 548062de2..de394a3d0 100644 --- a/src/nota.txt +++ b/src/nota.txt @@ -62,7 +62,7 @@ for (unsigned int target=0; target<n_spike_buffers; target++) { for (unsigned int target=0; target<n_spike_buffers; target++) { if (target.isSpikingNow()) { int n_target_rev_conn = TargetRevConnectionSize[target]; - + for (int i=0; i<n_target_rev_conn; i++) { unsigned int i_conn = TargetRevConnection[target][i]; unsigned char syn_group = ConnectionSynGroup[i_conn]; diff --git a/src/nota3.txt b/src/nota3.txt index f6cc92658..70673fd5e 100644 --- a/src/nota3.txt +++ b/src/nota3.txt @@ -12,4 +12,3 @@ scal_param_name_ = aeif_cond_alpha_scal_param_name; port_input_arr_ = GetVarArr() + n_scal_var_ + GetPortVarIdx("g1"); - diff --git a/src/parrot_neuron.cu b/src/parrot_neuron.cu index 39ec0d452..ff419bdbb 100644 --- a/src/parrot_neuron.cu +++ b/src/parrot_neuron.cu @@ -91,7 +91,7 @@ int parrot_neuron::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_var_ = N_PARROT_NEURON_SCAL_VAR; n_var_ = n_scal_var_; scal_var_name_ = parrot_neuron_scal_var_name; - + n_scal_param_ = N_PARROT_NEURON_SCAL_PARAM; n_param_ = n_scal_param_; scal_param_name_ = parrot_neuron_scal_param_name; @@ -117,7 +117,7 @@ int parrot_neuron::Init(int i_node_0, int n_node, int /*n_port*/, 2*sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 1; - + // input signal is stored in input_spike_height port_input_arr_ = GetVarArr() + GetScalVarIdx("input_spike_height"); port_input_arr_step_ = n_var_; @@ -141,7 +141,7 @@ int parrot_neuron::Update(long long /*i_time*/, double /*t1*/) int parrot_neuron::Free() { gpuErrchk(cudaFree(var_arr_)); - gpuErrchk(cudaFree(param_arr_)); + gpuErrchk(cudaFree(param_arr_)); return 0; } diff --git a/src/parrot_neuron.h b/src/parrot_neuron.h index 55aa0ce74..e9b01550d 100644 --- a/src/parrot_neuron.h +++ b/src/parrot_neuron.h @@ -59,10 +59,10 @@ Remarks - Weights on connections *from* the ``parrot_neuron`` are handled as usual. - Delays are honored on incoming and outgoing connections. -Only spikes arriving on connections to port (``receptor``) 0 will +Only spikes arriving on connections to port (``receptor``) 0 will be repeated. Connections onto port 1 will be accepted, but spikes incoming through port 1 will be ignored. This allows setting -exact pre- and postsynaptic spike times for STDP protocols by +exact pre- and postsynaptic spike times for STDP protocols by connecting two parrot neurons spiking at desired times by, e.g., a `stdp` onto port 1 on the postsynaptic parrot neuron. @@ -79,7 +79,7 @@ class parrot_neuron : public BaseNeuron unsigned long long *seed); int Free(); - + int Update(long long it, double t1); }; diff --git a/src/poiss_gen.cu b/src/poiss_gen.cu index 755715aaa..446d9c939 100644 --- a/src/poiss_gen.cu +++ b/src/poiss_gen.cu @@ -78,7 +78,7 @@ __global__ void PoissGenSendSpikeKernel(curandState *curand_state, double t, int i = port*NodeGroupArray[i_group].n_node_ + i_target - NodeGroupArray[i_group].i_node_0_; double d_val = (double)(weight*n); - atomicAddDouble(&NodeGroupArray[i_group].get_spike_array_[i], d_val); + atomicAddDouble(&NodeGroupArray[i_group].get_spike_array_[i], d_val); //////////////////////////////////////////////////////////////// } } @@ -95,14 +95,14 @@ int poiss_gen::Init(int i_node_0, int n_node, int /*n_port*/, n_param_ = n_scal_param_; scal_param_name_ = poiss_gen_scal_param_name; has_dir_conn_ = true; - + gpuErrchk(cudaMalloc(¶m_arr_, n_node_*n_param_*sizeof(float))); SetScalParam(0, n_node, "rate", 0.0); SetScalParam(0, n_node, "origin", 0.0); SetScalParam(0, n_node, "start", 0.0); SetScalParam(0, n_node, "stop", 1.0e30); - + return 0; } @@ -130,7 +130,7 @@ int poiss_gen::Calibrate(double, float) SetupPoissKernel<<<numBlocks, 1024>>>(d_curand_state_, n_dir_conn_, *seed_); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } @@ -143,7 +143,7 @@ int poiss_gen::Update(long long it, double t1) int poiss_gen::SendDirectSpikes(double t, float time_step) { unsigned int grid_dim_x, grid_dim_y; - + if (n_dir_conn_<65536*1024) { // max grid dim * max block dim grid_dim_x = (n_dir_conn_+1023)/1024; grid_dim_y = 1; @@ -162,7 +162,7 @@ int poiss_gen::SendDirectSpikes(double t, float time_step) PoissGenSendSpikeKernel<<<numBlocks, 1024>>>(d_curand_state_, t, time_step, param_arr_, n_param_, d_dir_conn_array_, n_dir_conn_); - + gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); diff --git a/src/poiss_gen.h b/src/poiss_gen.h index aae48549a..0f4b92615 100644 --- a/src/poiss_gen.h +++ b/src/poiss_gen.h @@ -81,12 +81,12 @@ class poiss_gen : public BaseNeuron { curandState *d_curand_state_; public: - + int Init(int i_node_0, int n_node, int n_port, int i_group, unsigned long long *seed); int Calibrate(double, float); - + int Update(long long it, double t1); int SendDirectSpikes(double t, float time_step); diff --git a/src/poisson.cu b/src/poisson.cu index eaea630cb..b022c5b6b 100644 --- a/src/poisson.cu +++ b/src/poisson.cu @@ -121,13 +121,13 @@ int PoissonGenerator::Create(curandGenerator_t *random_generator, i_node_0_ = i_node_0; n_node_ = n_node; lambda_ = lambda; - + n_steps_ = (buffer_size_ - 1)/n_node + 1; // with the above formula: // buffer_size <= n_node*n_steps <= buffer_size + n_node - 1 Init(random_generator, n_node_*n_steps_); i_step_ = 0; - + return 0; } @@ -141,7 +141,7 @@ int PoissonGenerator::Update(int max_n_steps) throw ngpu_exception("Step index larger than maximum number of steps " "in poisson generator"); } - + PoissonUpdate<<<1, 1>>>(&dev_poisson_data_[i_step_*n_node_]); gpuErrchk( cudaPeekAtLastError() ); diff --git a/src/poisson.h b/src/poisson.h index f1ad0205f..e5ccd595f 100644 --- a/src/poisson.h +++ b/src/poisson.h @@ -43,22 +43,22 @@ class PoissonGenerator int more_steps_; int i_node_0_; - + int Init(curandGenerator_t *random_generator, unsigned int n); public: curandGenerator_t *random_generator_; int n_node_; - + PoissonGenerator(); ~PoissonGenerator(); int Free(); - + int Create(curandGenerator_t *random_generator, int i_node_0, int n_node, float lambda); - + int Generate(); int Generate(int max_n_steps); diff --git a/src/prefix_scan.cu b/src/prefix_scan.cu index 541e9aa3d..aa1e237ee 100644 --- a/src/prefix_scan.cu +++ b/src/prefix_scan.cu @@ -35,7 +35,7 @@ int PrefixScan::Init() { //printf("Initializing CUDA-C scan...\n\n"); //initScan(); - + return 0; } @@ -51,6 +51,6 @@ int PrefixScan::Free() //closeScan(); //gpuErrchk(cudaFree(d_Output)); //gpuErrchk(cudaFree(d_Input)); - + return 0; } diff --git a/src/prefix_scan.h b/src/prefix_scan.h index 5caf9c721..3f681ac7d 100644 --- a/src/prefix_scan.h +++ b/src/prefix_scan.h @@ -43,7 +43,7 @@ class PrefixScan uint *h_OutputGPU; */ - + int Init(); int Scan(int *d_Output, int *d_Input, int n); diff --git a/src/pypath.m4 b/src/pypath.m4 index 5af6736d8..a4e62d14f 100644 --- a/src/pypath.m4 +++ b/src/pypath.m4 @@ -1,4 +1,4 @@ -AC_DEFUN([adl_CHECK_PYTHON], +AC_DEFUN([adl_CHECK_PYTHON], [AM_PATH_PYTHON([2.0]) AC_CACHE_CHECK([for $am_display_PYTHON includes directory], [adl_cv_python_inc], diff --git a/src/random.cu b/src/random.cu index aac64b44f..c580481d1 100644 --- a/src/random.cu +++ b/src/random.cu @@ -10,7 +10,7 @@ unsigned int *curand_int(curandGenerator_t &gen, size_t n) unsigned int *dev_data; // Allocate n integers on host unsigned int *host_data = new unsigned int[n]; - + // Allocate n integers on device CUDA_CALL(cudaMalloc((void **)&dev_data, n*sizeof(unsigned int))); // Create pseudo-random number generator @@ -23,7 +23,7 @@ unsigned int *curand_int(curandGenerator_t &gen, size_t n) cudaMemcpyDeviceToHost)); // Cleanup CUDA_CALL(cudaFree(dev_data)); - + return host_data; } @@ -32,7 +32,7 @@ float *curand_uniform(curandGenerator_t &gen, size_t n) float *dev_data; // Allocate n floats on host float *host_data = new float[n]; - + // Allocate n floats on device CUDA_CALL(cudaMalloc((void **)&dev_data, n*sizeof(float))); // Create pseudo-random number generator @@ -45,7 +45,7 @@ float *curand_uniform(curandGenerator_t &gen, size_t n) cudaMemcpyDeviceToHost)); // Cleanup CUDA_CALL(cudaFree(dev_data)); - + return host_data; } @@ -56,7 +56,7 @@ float *curand_normal(curandGenerator_t &gen, size_t n, float mean, float *dev_data; // Allocate n floats on host float *host_data = new float[n]; - + // Allocate n1 floats on device CUDA_CALL(cudaMalloc((void **)&dev_data, n1*sizeof(float))); // Create pseudo-random number generator @@ -71,7 +71,6 @@ float *curand_normal(curandGenerator_t &gen, size_t n, float mean, cudaMemcpyDeviceToHost)); // Cleanup CUDA_CALL(cudaFree(dev_data)); - + return host_data; } - diff --git a/src/rev_spike.cu b/src/rev_spike.cu index 86ff99eee..aee2b5f86 100644 --- a/src/rev_spike.cu +++ b/src/rev_spike.cu @@ -145,10 +145,10 @@ __global__ void RevSpikeReset() { *RevSpikeNum = 0; } - + int ResetConnectionSpikeTimeUp(NetConnection *net_connection) -{ +{ ResetConnectionSpikeTimeUpKernel <<<(net_connection->StoredNConnections()+1023)/1024, 1024>>> (net_connection->StoredNConnections()); @@ -158,7 +158,7 @@ int ResetConnectionSpikeTimeUp(NetConnection *net_connection) } int ResetConnectionSpikeTimeDown(NetConnection *net_connection) -{ +{ ResetConnectionSpikeTimeDownKernel <<<(net_connection->StoredNConnections()+1023)/1024, 1024>>> (net_connection->StoredNConnections()); @@ -170,14 +170,14 @@ int ResetConnectionSpikeTimeDown(NetConnection *net_connection) int RevSpikeInit(NetConnection *net_connection) { int n_spike_buffers = net_connection->connection_.size(); - + SetConnectionSpikeTime <<<(net_connection->StoredNConnections()+1023)/1024, 1024>>> (net_connection->StoredNConnections(), 0x8000); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk(cudaMalloc(&d_RevSpikeNum, sizeof(unsigned int))); - + gpuErrchk(cudaMalloc(&d_RevSpikeTarget, n_spike_buffers*sizeof(unsigned int))); gpuErrchk(cudaMalloc(&d_RevSpikeNConn, diff --git a/src/rk5.h b/src/rk5.h index 38790c082..856a3466b 100644 --- a/src/rk5.h +++ b/src/rk5.h @@ -84,19 +84,19 @@ void RK5Step(double &x, float *y, float &h, float h_min, float h_max, for (int i=0; i<NVAR; i++) { y_scal[i] = fabs(y[i]) + fabs(k1[i]*h) + scal_min; } - + float err; for(;;) { if (h > h_max) h = h_max; if (h < h_min) h = h_min; - + for (int i=0; i<NVAR; i++) { y_new[i] = y[i] + h*a21*k1[i]; } Derivatives<NVAR, NPARAM>(x+c2*h, y_new, k2, param, data_struct); - + for (int i=0; i<NVAR; i++) { y_new[i] = y[i] + h*(a31*k1[i] + a32*k2[i]); } @@ -108,23 +108,23 @@ void RK5Step(double &x, float *y, float &h, float h_min, float h_max, } Derivatives<NVAR, NPARAM>(x+c4*h, y_new, k4, param, data_struct); - + for (int i=0; i<NVAR; i++) { y_new[i] = y[i] + h*(a51*k1[i] + a52*k2[i] + a53*k3[i] + a54*k4[i]); } Derivatives<NVAR, NPARAM>(x+c5*h, y_new, k5, param, data_struct); - + for (int i=0; i<NVAR; i++) { y_new[i] = y[i] + h*(a61*k1[i] + a62*k2[i] + a63*k3[i] + a64*k4[i] + a65*k5[i]); } Derivatives<NVAR, NPARAM>(x+c6*h, y_new, k6, param, data_struct); - + for (int i=0; i<NVAR; i++) { y_new[i] = y[i] + h*(a71*k1[i] + a73*k3[i] + a74*k4[i] + a76*k6[i]); } - + err = 0.0; for (int i=0; i<NVAR; i++) { float val = h*(e1*k1[i] + e3*k3[i] + e4*k4[i] + e5*k5[i] + e6*k6[i]); @@ -136,14 +136,14 @@ void RK5Step(double &x, float *y, float &h, float h_min, float h_max, float h_new = h*coeff*pow(err,exp_dec); h = MAX(h_new, 0.1*h); - + //if (h <= h_min) { // h = h_min; //} //x_new = x + h; } - x += h; + x += h; if (err > err_min) { h = h*coeff*pow(err,exp_inc); @@ -151,7 +151,7 @@ void RK5Step(double &x, float *y, float &h, float h_min, float h_max, else { h = 5.0*h; } - + for (int i=0; i<NVAR; i++) { y[i] = y_new[i]; } @@ -201,7 +201,7 @@ void ArrayUpdate(int array_size, double *x_arr, float *h_arr, float *y_arr, y_arr[ArrayIdx*NVAR + i] = y[i]; } for(int j=0; j<NPARAM; j++) { - par_arr[ArrayIdx*NPARAM + j] = param[j]; + par_arr[ArrayIdx*NPARAM + j] = param[j]; } } } @@ -212,7 +212,7 @@ class RungeKutta5 int array_size_; int n_var_; int n_param_; - + double *d_XArr; float *d_HArr; float *d_YArr; @@ -221,7 +221,7 @@ class RungeKutta5 public: ~RungeKutta5(); - + double *GetXArr() {return d_XArr;} float *GetHArr() {return d_HArr;} float *GetYArr() {return d_YArr;} @@ -280,7 +280,7 @@ int RungeKutta5<DataStruct>::Init(int array_size, int n_var, int n_param, { array_size_ = array_size; n_var_ = n_var; - n_param_ = n_param; + n_param_ = n_param; gpuErrchk(cudaMalloc(&d_XArr, array_size_*sizeof(double))); gpuErrchk(cudaMalloc(&d_HArr, array_size_*sizeof(float))); @@ -292,7 +292,7 @@ int RungeKutta5<DataStruct>::Init(int array_size, int n_var, int n_param, x_min, h, data_struct); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } @@ -335,7 +335,7 @@ int RungeKutta5<DataStruct>::SetParam(int i_param, int i_array, int n_param, (&d_ParamArr[i_array*n_param_ + i_param], n_elem, n_param, val); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } diff --git a/src/scan.cu b/src/scan.cu index 937996eab..3ab420314 100644 --- a/src/scan.cu +++ b/src/scan.cu @@ -25,7 +25,7 @@ __global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); - + if (threadID < n) { temp[ai + bankOffsetA] = input[ai]; temp[bi + bankOffsetB] = input[bi]; @@ -34,7 +34,7 @@ __global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo temp[ai + bankOffsetA] = 0; temp[bi + bankOffsetB] = 0; } - + int offset = 1; for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree @@ -137,7 +137,7 @@ __global__ void prescan_large(int *output, int *input, int n, int *sums) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * n; - + int ai = threadID; int bi = threadID + (n / 2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); @@ -163,11 +163,11 @@ __global__ void prescan_large(int *output, int *input, int n, int *sums) { __syncthreads(); - if (threadID == 0) { + if (threadID == 0) { sums[blockID] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)]; temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0; - } - + } + for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; diff --git a/src/script.sh b/src/script.sh index 2256901c5..ede8bda24 100644 --- a/src/script.sh +++ b/src/script.sh @@ -11,7 +11,7 @@ for syn in cond_alpha cond_beta psc_alpha psc_exp psc_delta; do cat $fn | sed "s/aeif_${syn}/$um/g; s/AEIF${SYN}/$UM/g" > \ $fn1 done - + umf=user_m2_$syn um=user_m2 UM=USERM2 @@ -35,7 +35,7 @@ for syn in psc_exp psc_exp_g; do cat $fn | sed "s/iaf_${syn}/$um/g; s/IAF${SYN}/$UM/g" > \ $fn1 done - + umf=user_m2_iaf_$syn um=user_m2 UM=USERM2 @@ -44,7 +44,7 @@ for syn in psc_exp psc_exp_g; do echo "$fn $fn1" cat $fn | sed "s/iaf_${syn}/$um/g; s/IAF${SYN}/$UM/g" > \ $fn1 - done + done done /bin/cp user_m1_cond_beta.cu user_m1.cu @@ -56,4 +56,3 @@ done /bin/cp user_m2_cond_beta.h user_m2.h /bin/cp user_m2_cond_beta_kernel.h user_m2_kernel.h /bin/cp user_m2_cond_beta_rk5.h user_m2_rk5.h - diff --git a/src/spike_buffer.cu b/src/spike_buffer.cu index dc39e74ca..62515d9b6 100644 --- a/src/spike_buffer.cu +++ b/src/spike_buffer.cu @@ -184,7 +184,7 @@ __device__ void PushSpike(int i_spike_buffer, float height) // last time when spike is sent back to dendrites (e.g. for STDP) LastRevSpikeTimeIdx[i_spike_buffer] = NESTGPUTimeIdx; } - + #ifdef HAVE_MPI if (NESTGPUMpiFlag) { // if MPI is active spike should eventually be sent to remote connections @@ -248,7 +248,7 @@ __global__ void SpikeBufferUpdate() { int i_spike_buffer = threadIdx.x + blockIdx.x * blockDim.x; if (i_spike_buffer>=NSpikeBuffer) return; - + int i_group=NodeGroupMap[i_spike_buffer]; int den_delay_idx; float *den_delay_arr = NodeGroupArray[i_group].den_delay_arr_; @@ -334,12 +334,12 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) h_NSpikeBuffer = n_spike_buffers; int max_delay_num = net_connection->MaxDelayNum(); //printf("mdn: %d\n", max_delay_num); - + gpuErrchk(cudaMalloc(&d_LastSpikeTimeIdx, n_spike_buffers*sizeof(long long))); gpuErrchk(cudaMalloc(&d_LastSpikeHeight, n_spike_buffers*sizeof(float))); gpuErrchk(cudaMalloc(&d_LastRevSpikeTimeIdx, n_spike_buffers *sizeof(long long))); - + unsigned int n_conn = net_connection->StoredNConnections(); unsigned int *h_conn_target = new unsigned int[n_conn]; unsigned char *h_conn_syn_group = new unsigned char[n_conn]; @@ -395,7 +395,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) n_spike_buffers*max_delay_num *sizeof(unsigned short*))); } - + unsigned int i_conn = 0; for (unsigned int i_source=0; i_source<n_spike_buffers; i_source++) { std::vector<ConnGroup> *conn = &(net_connection->connection_[i_source]); @@ -428,7 +428,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) i_conn += n_target; } } - + cudaMemcpyAsync(d_conn_target, h_conn_target, n_conn*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpyAsync(d_ConnectionSynGroup, h_conn_syn_group, @@ -447,14 +447,14 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) rev_connections[target].push_back(i_conn); } } - + net_connection->SetNRevConnections(n_rev_conn); if (n_rev_conn>0) { unsigned int *h_rev_conn = new unsigned int[n_rev_conn]; int *h_target_rev_conn_size = new int[n_spike_buffers]; unsigned int **h_target_rev_conn = new unsigned int*[n_spike_buffers]; - + gpuErrchk(cudaMalloc(&d_RevConnections, n_rev_conn*sizeof(unsigned int))); gpuErrchk(cudaMalloc(&d_TargetRevConnectionSize, n_spike_buffers*sizeof(int))); @@ -482,7 +482,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) delete[] h_target_rev_conn_size; delete[] h_target_rev_conn; } - + cudaMemcpyAsync(d_ConnectionGroupSize, h_ConnectionGroupSize, n_spike_buffers*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpyAsync(d_ConnectionGroupDelay, h_ConnectionGroupDelay, @@ -493,11 +493,11 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) cudaMemcpyAsync(d_ConnectionGroupTargetNode, h_ConnectionGroupTargetNode, n_spike_buffers*max_delay_num*sizeof(unsigned int*), cudaMemcpyHostToDevice); - + cudaMemcpyAsync(d_ConnectionGroupTargetSynGroup, h_ConnectionGroupTargetSynGroup, n_spike_buffers*max_delay_num*sizeof(unsigned char*), cudaMemcpyHostToDevice); - + cudaMemcpyAsync(d_ConnectionGroupTargetWeight, h_ConnectionGroupTargetWeight, n_spike_buffers*max_delay_num*sizeof(float*), cudaMemcpyHostToDevice); @@ -510,7 +510,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) DeviceSpikeBufferInit<<<1,1>>>(n_spike_buffers, max_delay_num, max_spike_buffer_size, - d_LastSpikeTimeIdx, d_LastSpikeHeight, + d_LastSpikeTimeIdx, d_LastSpikeHeight, d_ConnectionWeight, d_ConnectionSynGroup, d_ConnectionSpikeTime, d_ConnectionGroupSize, d_ConnectionGroupDelay, @@ -526,7 +526,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) d_TargetRevConnection, d_LastRevSpikeTimeIdx ); gpuErrchk( cudaPeekAtLastError() ); - + InitLastSpikeTimeIdx <<<(n_spike_buffers+1023)/1024, 1024>>> (n_spike_buffers, LAST_SPIKE_TIME_GUARD); @@ -548,7 +548,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) if(h_ConnectionGroupTargetSpikeTime != NULL) { delete[] h_ConnectionGroupTargetSpikeTime; } - + return 0; } @@ -558,7 +558,7 @@ __global__ void DeviceSpikeBufferInit(int n_spike_buffers, int max_delay_num, float *last_spike_height, float *conn_weight, unsigned char *conn_syn_group, - unsigned short *conn_spike_time, + unsigned short *conn_spike_time, int *conn_group_size, int *conn_group_delay, int *conn_group_target_size, unsigned int **conn_group_target_node, diff --git a/src/spike_buffer.cu.commented b/src/spike_buffer.cu.commented index 04c0fb461..7039357e8 100644 --- a/src/spike_buffer.cu.commented +++ b/src/spike_buffer.cu.commented @@ -165,7 +165,7 @@ __device__ void PushSpike(int i_spike_buffer, float height) // last time when spike is sent back to dendrites (e.g. for STDP) LastRevSpikeTimeIdx[i_spike_buffer] = NESTGPUTimeIdx; } - + #ifdef HAVE_MPI if (NESTGPUMpiFlag) { // if MPI is active spike should eventually be sent to remote connections @@ -200,14 +200,14 @@ __device__ void PushSpike(int i_spike_buffer, float height) // spike should be stored if there are output connections // or if dendritic delay is > 0 if (ConnectionGroupSize[i_spike_buffer]>0 || den_delay_idx>0) { - int Ns = SpikeBufferSize[i_spike_buffer]; // n. of spikes in buffer + int Ns = SpikeBufferSize[i_spike_buffer]; // n. of spikes in buffer if (Ns>=MaxSpikeBufferSize) { printf("Maximum number of spikes in spike buffer exceeded" " for spike buffer %d\n", i_spike_buffer); //exit(0); return; } - + /////////////////////////////////// // push_front new spike in buffer ////////////////////////////////// @@ -215,7 +215,7 @@ __device__ void PushSpike(int i_spike_buffer, float height) for (int is=Ns; is>0; is--) { int i_arr = is*NSpikeBuffer+i_spike_buffer; // spike index in array int i_prev_arr = i_arr - NSpikeBuffer; // previous spike index in array - // shift all buffer content by one position + // shift all buffer content by one position SpikeBufferTimeIdx[i_arr] = SpikeBufferTimeIdx[i_prev_arr]; SpikeBufferConnIdx[i_arr] = @@ -236,17 +236,17 @@ __global__ void SpikeBufferUpdate() { int i_spike_buffer = threadIdx.x + blockIdx.x * blockDim.x; if (i_spike_buffer>=NSpikeBuffer) return; - + int i_group=NodeGroupMap[i_spike_buffer]; int den_delay_idx; float *den_delay_arr = NodeGroupArray[i_group].den_delay_arr_; - // check if node has dendritic delay + // check if node has dendritic delay if (den_delay_arr != NULL) { int i_neuron = i_spike_buffer - NodeGroupArray[i_group].i_node_0_; int n_param = NodeGroupArray[i_group].n_param_; // dendritic delay index is stored in the parameter array // den_delay_arr points to the dendritic delay if the first - // node of the group. The other are separate by steps = n_param + // node of the group. The other are separate by steps = n_param den_delay_idx = (int)round(den_delay_arr[i_neuron*n_param] /NESTGPUTimeResolution); //printf("isb update %d\tden_delay_idx: %d\n", i_spike_buffer, den_delay_idx); @@ -257,7 +257,7 @@ __global__ void SpikeBufferUpdate() // flag for sending spikes back through dendrites (e.g. for STDP) bool rev_spike = false; - + int Ns = SpikeBufferSize[i_spike_buffer]; // n. of spikes in buffer for (int is=0; is<Ns; is++) { int i_arr = is*NSpikeBuffer+i_spike_buffer; // spike index in array @@ -321,11 +321,11 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) unsigned int n_spike_buffers = net_connection->connection_.size(); int max_delay_num = net_connection->MaxDelayNum(); //printf("mdn: %d\n", max_delay_num); - + gpuErrchk(cudaMalloc(&d_LastSpikeTimeIdx, n_spike_buffers*sizeof(int))); gpuErrchk(cudaMalloc(&d_LastSpikeHeight, n_spike_buffers*sizeof(float))); gpuErrchk(cudaMalloc(&d_LastRevSpikeTimeIdx, n_spike_buffers*sizeof(int))); - + unsigned int n_conn = net_connection->StoredNConnections(); unsigned int *h_conn_target = new unsigned int[n_conn]; unsigned char *h_conn_syn_group = new unsigned char[n_conn]; @@ -379,7 +379,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) n_spike_buffers*max_delay_num *sizeof(unsigned short*))); } - + unsigned int i_conn = 0; for (unsigned int i_source=0; i_source<n_spike_buffers; i_source++) { std::vector<ConnGroup> *conn = &(net_connection->connection_[i_source]); @@ -412,7 +412,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) i_conn += n_target; } } - + cudaMemcpy(d_conn_target, h_conn_target, n_conn*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(d_ConnectionSynGroup, h_conn_syn_group, @@ -432,17 +432,17 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) rev_connections[target].push_back(i_conn); } } - + delete[] h_conn_target; delete[] h_conn_syn_group; - + net_connection->SetNRevConnections(n_rev_conn); if (n_rev_conn>0) { unsigned int *h_rev_conn = new unsigned int[n_rev_conn]; int *h_target_rev_conn_size = new int[n_spike_buffers]; unsigned int **h_target_rev_conn = new unsigned int*[n_spike_buffers]; - + gpuErrchk(cudaMalloc(&d_RevConnections, n_rev_conn*sizeof(unsigned int))); gpuErrchk(cudaMalloc(&d_TargetRevConnectionSize, n_spike_buffers*sizeof(int))); @@ -465,12 +465,12 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) n_spike_buffers*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_TargetRevConnection, h_target_rev_conn, n_spike_buffers*sizeof(unsigned int*), cudaMemcpyHostToDevice); - + delete[] h_rev_conn; delete[] h_target_rev_conn_size; delete[] h_target_rev_conn; } - + cudaMemcpy(d_ConnectionGroupSize, h_ConnectionGroupSize, n_spike_buffers*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_ConnectionGroupDelay, h_ConnectionGroupDelay, @@ -481,11 +481,11 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) cudaMemcpy(d_ConnectionGroupTargetNode, h_ConnectionGroupTargetNode, n_spike_buffers*max_delay_num*sizeof(unsigned int*), cudaMemcpyHostToDevice); - + cudaMemcpy(d_ConnectionGroupTargetSynGroup, h_ConnectionGroupTargetSynGroup, n_spike_buffers*max_delay_num*sizeof(unsigned char*), cudaMemcpyHostToDevice); - + cudaMemcpy(d_ConnectionGroupTargetWeight, h_ConnectionGroupTargetWeight, n_spike_buffers*max_delay_num*sizeof(float*), cudaMemcpyHostToDevice); @@ -498,7 +498,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) DeviceSpikeBufferInit<<<1,1>>>(n_spike_buffers, max_delay_num, max_spike_buffer_size, - d_LastSpikeTimeIdx, d_LastSpikeHeight, + d_LastSpikeTimeIdx, d_LastSpikeHeight, d_ConnectionWeight, d_ConnectionSynGroup, d_ConnectionSpikeTime, d_ConnectionGroupSize, d_ConnectionGroupDelay, @@ -514,7 +514,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + InitLastSpikeTimeIdx <<<(n_spike_buffers+1023)/1024, 1024>>> (n_spike_buffers, LAST_SPIKE_TIME_GUARD); @@ -531,7 +531,7 @@ int SpikeBufferInit(NetConnection *net_connection, int max_spike_buffer_size) if(h_ConnectionGroupTargetSpikeTime != NULL) { delete[] h_ConnectionGroupTargetSpikeTime; } - + return 0; } @@ -541,7 +541,7 @@ __global__ void DeviceSpikeBufferInit(int n_spike_buffers, int max_delay_num, float *last_spike_height, float *conn_weight, unsigned char *conn_syn_group, - unsigned short *conn_spike_time, + unsigned short *conn_spike_time, int *conn_group_size, int *conn_group_delay, int *conn_group_target_size, unsigned int **conn_group_target_node, @@ -580,4 +580,3 @@ __global__ void DeviceSpikeBufferInit(int n_spike_buffers, int max_delay_num, TargetRevConnection = target_rev_conn; LastRevSpikeTimeIdx = last_rev_spike_time_idx; } - diff --git a/src/spike_buffer.h b/src/spike_buffer.h index f5e616a86..ea519fd57 100644 --- a/src/spike_buffer.h +++ b/src/spike_buffer.h @@ -127,7 +127,7 @@ __global__ void DeviceSpikeBufferInit(int n_spike_buffers, int max_delay_num, float *last_spike_height, float *conn_weight, unsigned char *conn_syn_group, - unsigned short *conn_spike_time, + unsigned short *conn_spike_time, int *conn_group_size, int *conn_group_delay, int *conn_group_target_size, unsigned int **conn_group_target_node, diff --git a/src/spike_detector.cu b/src/spike_detector.cu index 237aafc2f..a0f9ea811 100644 --- a/src/spike_detector.cu +++ b/src/spike_detector.cu @@ -89,7 +89,7 @@ int spike_detector::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_var_ = N_SPIKE_DETECTOR_SCAL_VAR; n_var_ = n_scal_var_; scal_var_name_ = spike_detector_scal_var_name; - + n_scal_param_ = N_SPIKE_DETECTOR_SCAL_PARAM; n_param_ = n_scal_param_; scal_param_name_ = spike_detector_scal_param_name; @@ -111,12 +111,12 @@ int spike_detector::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input signal is stored in input_spike_height port_input_arr_ = GetVarArr() + GetScalVarIdx("input_spike_height"); port_input_arr_step_ = n_var_; port_input_port_step_ = n_port_var_; - + return 0; } @@ -133,7 +133,7 @@ int spike_detector::Update(long long /*i_time*/, double /*t1*/) int spike_detector::Free() { gpuErrchk(cudaFree(var_arr_)); - gpuErrchk(cudaFree(param_arr_)); + gpuErrchk(cudaFree(param_arr_)); return 0; } diff --git a/src/spike_detector.h b/src/spike_detector.h index 047366ac8..e5e30e457 100644 --- a/src/spike_detector.h +++ b/src/spike_detector.h @@ -46,7 +46,7 @@ The ``spike_detector`` collects and records all spikes it receives from neurons that are connected to it. Any neuron from which spikes have to be recorded must be connected to -the spike recorder using the standard ``Connect`` command. +the spike recorder using the standard ``Connect`` command. .. warning:: @@ -70,17 +70,17 @@ Here follows an example: recorder = nestgpu.CreateRecord("", ["spike_height"], [spike_det[0]], [0]) nestgpu.Simulate() - + recorded_data = nestgpu.GetRecordData(record) time = [row[0] for row in recorded_data] spike_height = [row[1] for row in recorded_data] The output is thus a continuous variable, which is 0 when no spikes are emitted -by the neuron, and is ``weights`` when a spike is emitted. +by the neuron, and is ``weights`` when a spike is emitted. .. note:: - A faster implementation for spike recording, which is also similar to + A faster implementation for spike recording, which is also similar to the one of NEST in terms of output, is described in the guide of :doc:`how to record spikes <../guides/how_to_record_spikes>`. @@ -101,7 +101,7 @@ class spike_detector : public BaseNeuron unsigned long long *seed); int Free(); - + int Update(long long it, double t1); }; diff --git a/src/spike_generator.cu b/src/spike_generator.cu index 145989519..7e1bd5018 100644 --- a/src/spike_generator.cu +++ b/src/spike_generator.cu @@ -77,37 +77,37 @@ int spike_generator::Init(int i_node_0, int n_node, int /*n_port*/, for (int i=0; i<N_SPIKE_GEN_ARRAY_PARAM; i++) { array_param_name_.push_back(spike_gen_array_param_name[i]); - } + } std::vector<float> empty_vect; spike_time_vect_.clear(); spike_time_vect_.insert(spike_time_vect_.begin(), n_node, empty_vect); spike_height_vect_.clear(); spike_height_vect_.insert(spike_height_vect_.begin(), n_node, empty_vect); - + gpuErrchk(cudaMalloc(¶m_arr_, n_node_*n_param_*sizeof(float))); //SetScalParam(0, n_node, "origin", 0.0); - + h_spike_time_idx_ = new int*[n_node_]; h_spike_height_ = new float*[n_node_]; for (int i_node=0; i_node<n_node_; i_node++) { h_spike_time_idx_[i_node] = 0; h_spike_height_[i_node] = 0; } - + gpuErrchk(cudaMalloc(&d_n_spikes_, n_node_*sizeof(int))); gpuErrchk(cudaMalloc(&d_i_spike_, n_node_*sizeof(int))); gpuErrchk(cudaMalloc(&d_spike_time_idx_, n_node_*sizeof(int*))); gpuErrchk(cudaMalloc(&d_spike_height_, n_node_*sizeof(float*))); - + gpuErrchk(cudaMemset(d_n_spikes_, 0, n_node_*sizeof(int))); gpuErrchk(cudaMemset(d_i_spike_, 0, n_node_*sizeof(int))); gpuErrchk(cudaMemset(d_spike_time_idx_, 0, n_node_*sizeof(int*))); gpuErrchk(cudaMemset(d_spike_height_, 0, n_node_*sizeof(float*))); - + gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } @@ -119,15 +119,15 @@ int spike_generator::Free() gpuErrchk(cudaFree(h_spike_time_idx_[i_node])); gpuErrchk(cudaFree(h_spike_height_[i_node])); } - } + } gpuErrchk(cudaFree(d_n_spikes_)); - gpuErrchk(cudaFree(d_i_spike_)); + gpuErrchk(cudaFree(d_i_spike_)); gpuErrchk(cudaFree(d_spike_time_idx_)); gpuErrchk(cudaFree(d_spike_height_)); delete[] h_spike_time_idx_; delete[] h_spike_height_; - + return 0; } @@ -173,7 +173,7 @@ int spike_generator::SetArrayParam(int i_neuron, int n_neuron, return 0; } - + int spike_generator::SetArrayParam(int *i_neuron, int n_neuron, std::string param_name, float *array, int array_size) @@ -188,7 +188,7 @@ int spike_generator::SetArrayParam(int *i_neuron, int n_neuron, else if (param_name==array_param_name_[i_SPIKE_HEIGHT_ARRAY_PARAM]) { for (int i=0; i<n_neuron; i++) { int in = i_neuron[i]; - CheckNeuronIdx(in); + CheckNeuronIdx(in); spike_height_vect_[in] = std::vector<float>(array, array+array_size); } } @@ -217,10 +217,10 @@ int spike_generator::Calibrate(double time_min, float time_resolution) spike_height_vect_[in].data(), (float)time_min, time_resolution); } } - + return 0; } - + int spike_generator::SetSpikes(int irel_node, int n_spikes, float *spike_time, @@ -231,7 +231,7 @@ int spike_generator::SetSpikes(int irel_node, int n_spikes, float *spike_time, throw ngpu_exception("Number of spikes must be greater than 0 " "in spike generator setting"); } - + cudaMemcpy(&d_n_spikes_[irel_node], &n_spikes, sizeof(int), cudaMemcpyHostToDevice); if (h_spike_time_idx_[irel_node] != 0) { @@ -257,9 +257,9 @@ int spike_generator::SetSpikes(int irel_node, int n_spikes, float *spike_time, } //cout << "ti " << spike_time_idx[i] << endl; //cout << spike_time[i] << " " << time_min << endl; - + } - + cudaMemcpy(h_spike_time_idx_[irel_node], spike_time_idx, n_spikes*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(h_spike_height_[irel_node], spike_height, n_spikes*sizeof(float), diff --git a/src/spike_generator.h b/src/spike_generator.h index e4ad9a61d..4aa828269 100644 --- a/src/spike_generator.h +++ b/src/spike_generator.h @@ -58,7 +58,7 @@ The following parameters can be set in the status dictionary. Spike times are given in milliseconds, and must be sorted with the earliest spike first. All spike times must be strictly in the future -(i.e. greater than the current time step). +(i.e. greater than the current time step). It is possible that spike times do not coincide with a time step, i.e., are not a multiple of the simulation resolution. @@ -83,7 +83,7 @@ EndUserDocs class spike_generator : public BaseNeuron { int *d_n_spikes_; - int *d_i_spike_; + int *d_i_spike_; int **d_spike_time_idx_; float **d_spike_height_; int **h_spike_time_idx_; @@ -93,25 +93,25 @@ class spike_generator : public BaseNeuron int SetSpikes(int irel_node, int n_spikes, float *spike_time, float *spike_height, float time_min, float time_resolution); - + public: ~spike_generator(); - + int Init(int i_node_0, int n_node, int n_port, int i_group, unsigned long long *seed); int Free(); - + int Update(long long i_time, double t1); int Calibrate(double time_min, float time_resolution); int SetArrayParam(int i_neuron, int n_neuron, std::string param_name, float *array, int array_size); - + int SetArrayParam(int *i_neuron, int n_neuron, std::string param_name, float *array, int array_size); - + int GetArrayParamSize(int i_neuron, std::string param_name); float *GetArrayParam(int i_neuron, std::string param_name); diff --git a/src/spike_mpi.cu b/src/spike_mpi.cu index babe29c01..8cb3fd0c8 100644 --- a/src/spike_mpi.cu +++ b/src/spike_mpi.cu @@ -132,7 +132,7 @@ __global__ void SendExternalSpike() int i_source = ExternalSpikeSourceNode[i_spike]; float height = ExternalSpikeHeight[i_spike]; int Nth = NExternalNodeTargetHost[i_source]; - + for (int ith=0; ith<Nth; ith++) { int target_host_id = ExternalNodeTargetHostId[i_source][ith]; int remote_node_id = ExternalNodeId[i_source][ith]; @@ -166,7 +166,7 @@ int ConnectMpi::ExternalSpikeInit(int n_node, int n_hosts, int max_spike_per_hos int *h_NExternalNodeTargetHost = new int[n_node]; int **h_ExternalNodeTargetHostId = new int*[n_node]; int **h_ExternalNodeId = new int*[n_node]; - + //h_ExternalSpikeNodeId = new int[max_spike_per_host]; h_ExternalTargetSpikeNum = new int [n_hosts]; h_ExternalTargetSpikeCumul = new int[n_hosts+1]; @@ -177,7 +177,7 @@ int ConnectMpi::ExternalSpikeInit(int n_node, int n_hosts, int max_spike_per_hos h_ExternalSpikeHeight = new float[max_spike_per_host]; recv_mpi_request = new MPI_Request[n_hosts]; - + gpuErrchk(cudaMalloc(&d_ExternalSpikeNum, sizeof(int))); gpuErrchk(cudaMalloc(&d_ExternalSpikeSourceNode, max_spike_per_host*sizeof(int))); @@ -203,7 +203,7 @@ int ConnectMpi::ExternalSpikeInit(int n_node, int n_hosts, int max_spike_per_hos gpuErrchk(cudaMalloc(&d_NExternalNodeTargetHost, n_node*sizeof(int))); gpuErrchk(cudaMalloc(&d_ExternalNodeTargetHostId, n_node*sizeof(int*))); gpuErrchk(cudaMalloc(&d_ExternalNodeId, n_node*sizeof(int*))); - + for (int i_source=0; i_source<n_node; i_source++) { std::vector< ExternalConnectionNode > *conn = &extern_connection_[i_source]; int Nth = conn->size(); @@ -267,7 +267,7 @@ __global__ void DeviceExternalSpikeInit(int n_hosts, int **ext_node_target_host_id, int **ext_node_id ) - + { NExternalTargetHost = n_hosts; MaxSpikePerHost = max_spike_per_host; @@ -283,7 +283,7 @@ __global__ void DeviceExternalSpikeInit(int n_hosts, *ExternalSpikeNum = 0; for (int ith=0; ith<NExternalTargetHost; ith++) { ExternalTargetSpikeNum[ith] = 0; - } + } } @@ -320,7 +320,7 @@ int ConnectMpi::SendSpikeToRemote(int n_hosts, int max_spike_per_host) int array_idx = h_ExternalTargetSpikeCumul[ih]; int n_spikes = h_ExternalTargetSpikeCumul[ih+1] - array_idx; //printf("MPI_Send (src,tgt,nspike): %d %d %d\n", mpi_id, ih, n_spike); - + // nonblocking sent of spike packet to MPI proc ih MPI_Isend(&h_ExternalTargetSpikeNodeId[array_idx], n_spikes, MPI_INT, ih, tag, MPI_COMM_WORLD, &request); @@ -331,13 +331,13 @@ int ConnectMpi::SendSpikeToRemote(int n_hosts, int max_spike_per_host) // h_ExternalTargetSpikeNodeId[array_idx]); } SendSpikeToRemote_MPI_time_ += (getRealTime() - time_mark); - + return 0; } // Receive spikes from remote MPI processes int ConnectMpi::RecvSpikeFromRemote(int n_hosts, int max_spike_per_host) - + { int tag = 1; @@ -367,7 +367,7 @@ int ConnectMpi::RecvSpikeFromRemote(int n_hosts, int max_spike_per_host) } RecvSpikeFromRemote_MPI_time_ += (getRealTime() - time_mark); - + return 0; } @@ -399,7 +399,7 @@ int ConnectMpi::CopySpikeFromRemote(int n_hosts, int max_spike_per_host, n_spike_tot*sizeof(int), cudaMemcpyHostToDevice)); RecvSpikeFromRemote_CUDAcp_time_ += (getRealTime() - time_mark); // convert node group indexes to spike buffer indexes - // by adding the index of the first node of the node group + // by adding the index of the first node of the node group //AddOffset<<<(n_spike_tot+1023)/1024, 1024>>> // (n_spike_tot, d_ExternalSourceSpikeNodeId, i_remote_node_0); //gpuErrchk( cudaPeekAtLastError() ); @@ -410,7 +410,7 @@ int ConnectMpi::CopySpikeFromRemote(int n_hosts, int max_spike_per_host, gpuErrchk( cudaPeekAtLastError() ); //cudaDeviceSynchronize(); } - + return n_spike_tot; } @@ -469,7 +469,7 @@ int ConnectMpi::JoinSpikes(int n_hosts, int max_spike_per_host) } JoinSpike_time_ += (getRealTime() - time_mark); - + return n_spike_tot; } diff --git a/src/stdp.cu b/src/stdp.cu index 36a2223a8..7a59c7c9f 100644 --- a/src/stdp.cu +++ b/src/stdp.cu @@ -56,7 +56,7 @@ __device__ void STDPUpdate(float *weight_pt, float Dt, float *param) double fact = -alpha*lambda*exp((double)Dt/tau_minus); w1 = w + fact*Wmax*pow(w/Wmax, mu_minus); } - + w1 = w1 >0.0 ? w1 : 0.0; w1 = w1 < Wmax ? w1 : Wmax; *weight_pt = (float)w1; diff --git a/src/stdp.h b/src/stdp.h index 97c568b42..c140b19d7 100644 --- a/src/stdp.h +++ b/src/stdp.h @@ -41,7 +41,7 @@ Description The STDP class is a type of synapse model used to create synapses that enable spike timing dependent plasticity -(as defined in [1]_). +(as defined in [1]_). Here the weight dependence exponent can be set separately for potentiation and depression. diff --git a/src/syn_model.cu b/src/syn_model.cu index 797e76c6f..561a06e56 100644 --- a/src/syn_model.cu +++ b/src/syn_model.cu @@ -63,7 +63,7 @@ __global__ void SynGroupInit(int *syn_group_type_map, { SynGroupTypeMap = syn_group_type_map; SynGroupParamMap = syn_group_param_map; - + } int SynModel::GetNParam() @@ -77,7 +77,7 @@ std::vector<std::string> SynModel::GetParamNames() for (int i=0; i<n_param_; i++) { param_name_vect.push_back(param_name_[i]); } - + return param_name_vect; } @@ -100,7 +100,7 @@ int SynModel::GetParamIdx(std::string param_name) throw ngpu_exception(std::string("Unrecognized parameter ") + param_name); } - + return i_param; } @@ -132,7 +132,7 @@ int SynModel::SetParam(std::string param_name, float val) return 0; } - + int NESTGPU::CreateSynGroup(std::string model_name) { CheckUncalibrated("Nodes cannot be created after calibration"); @@ -230,7 +230,7 @@ int NESTGPU::SynGroupCalibrate() SynGroupInit<<<1,1>>>(d_SynGroupTypeMap, d_SynGroupParamMap); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); - + delete[] h_SynGroupTypeMap; delete[] h_SynGroupParamMap; diff --git a/src/user_m1.cu b/src/user_m1.cu index 77c3056ce..a2631d629 100644 --- a/src/user_m1.cu +++ b/src/user_m1.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -99,7 +99,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, user_m1_rk5 data_struct) @@ -143,7 +143,7 @@ int user_m1::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -167,7 +167,7 @@ int user_m1::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m1.cu.bk b/src/user_m1.cu.bk index c0618dec1..0286f67b7 100644 --- a/src/user_m1.cu.bk +++ b/src/user_m1.cu.bk @@ -42,7 +42,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -87,7 +87,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, user_m1_rk5 data_struct) @@ -108,14 +108,14 @@ using namespace user_m1_ns; int user_m1::Init(int i_node_0, int n_node, int n_port, int i_group, unsigned long long *seed) { BaseNeuron::Init(i_node_0, n_node, n_port, i_group, seed); - + node_type_ = i_user_m1_model; n_scal_var_ = N_SCAL_VAR; n_port_var_ = N_PORT_VAR; n_scal_param_ = N_SCAL_PARAM; n_port_param_ = N_PORT_PARAM; n_group_param_ = N_GROUP_PARAM; - + n_var_ = n_scal_var_ + n_port_var_*n_port; n_param_ = n_scal_param_ + n_port_param_*n_port; @@ -156,7 +156,7 @@ int user_m1::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } @@ -171,4 +171,3 @@ int user_m1::Update(long long it, double t1) { return 0; } - diff --git a/src/user_m1.h b/src/user_m1.h index f2192726c..4146ffa2f 100644 --- a/src/user_m1.h +++ b/src/user_m1.h @@ -49,22 +49,22 @@ class user_m1 : public BaseNeuron float h_min_; float h_; user_m1_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m1.h.bk b/src/user_m1.h.bk index b6df4fca8..ec3d94d3f 100644 --- a/src/user_m1.h.bk +++ b/src/user_m1.h.bk @@ -37,22 +37,22 @@ class user_m1 : public BaseNeuron float h_min_; float h_; user_m1_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m1_cond_alpha.cu b/src/user_m1_cond_alpha.cu index 04daec8f7..56775bf51 100644 --- a/src/user_m1_cond_alpha.cu +++ b/src/user_m1_cond_alpha.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -81,7 +81,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, user_m1_rk5 data_struct) @@ -113,7 +113,7 @@ int user_m1::Init(int i_node_0, int n_node, int n_port, n_param_ = n_scal_param_ + n_port_param_*n_port; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = user_m1_scal_var_name; port_var_name_= user_m1_port_var_name; scal_param_name_ = user_m1_scal_param_name; @@ -125,7 +125,7 @@ int user_m1::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -149,7 +149,7 @@ int user_m1::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m1_cond_alpha.h b/src/user_m1_cond_alpha.h index 54f4fc07b..441ba5e19 100644 --- a/src/user_m1_cond_alpha.h +++ b/src/user_m1_cond_alpha.h @@ -49,22 +49,22 @@ class user_m1 : public BaseNeuron float h_min_; float h_; user_m1_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m1_cond_alpha_kernel.h b/src/user_m1_cond_alpha_kernel.h index 15f0d8cf5..19967d8d6 100644 --- a/src/user_m1_cond_alpha_kernel.h +++ b/src/user_m1_cond_alpha_kernel.h @@ -112,7 +112,7 @@ const std::string user_m1_scal_param_name[N_SCAL_PARAM] = { const std::string user_m1_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_syn", - "g0" + "g0" }; const std::string user_m1_group_param_name[N_GROUP_PARAM] = { @@ -157,7 +157,7 @@ const std::string user_m1_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/user_m1_cond_beta.cu b/src/user_m1_cond_beta.cu index a621a65b0..cef66fd2c 100644 --- a/src/user_m1_cond_beta.cu +++ b/src/user_m1_cond_beta.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -99,7 +99,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, user_m1_rk5 data_struct) @@ -143,7 +143,7 @@ int user_m1::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -167,7 +167,7 @@ int user_m1::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m1_cond_beta.h b/src/user_m1_cond_beta.h index 87cb0e83b..50614d0ee 100644 --- a/src/user_m1_cond_beta.h +++ b/src/user_m1_cond_beta.h @@ -49,22 +49,22 @@ class user_m1 : public BaseNeuron float h_min_; float h_; user_m1_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m1_cond_beta_kernel.h b/src/user_m1_cond_beta_kernel.h index 2d607b4a8..52abd6132 100644 --- a/src/user_m1_cond_beta_kernel.h +++ b/src/user_m1_cond_beta_kernel.h @@ -115,7 +115,7 @@ const std::string user_m1_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_rise", "tau_decay", - "g0" + "g0" }; const std::string user_m1_group_param_name[N_GROUP_PARAM] = { @@ -161,7 +161,7 @@ const std::string user_m1_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/user_m1_iaf_psc_exp.cu b/src/user_m1_iaf_psc_exp.cu index a0bb45bc0..e93ff6d0e 100644 --- a/src/user_m1_iaf_psc_exp.cu +++ b/src/user_m1_iaf_psc_exp.cu @@ -70,12 +70,12 @@ __global__ void user_m1_Calibrate(int n_node, float *param_arr, int i_neuron = threadIdx.x + blockIdx.x * blockDim.x; if (i_neuron<n_node) { float *param = param_arr + n_param*i_neuron; - + P11ex = exp( -h / tau_ex ); P11in = exp( -h / tau_in ); P22 = exp( -h / tau_m ); P21ex = (float)propagator_32( tau_ex, tau_m, C_m, h ); - P21in = (float)propagator_32( tau_in, tau_m, C_m, h ); + P21in = (float)propagator_32( tau_in, tau_m, C_m, h ); P20 = tau_m / C_m * ( 1.0 - P22 ); } } @@ -88,7 +88,7 @@ __global__ void user_m1_Update(int n_node, int i_node_0, float *var_arr, if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -99,12 +99,12 @@ __global__ void user_m1_Update(int n_node, int i_node_0, float *var_arr, // exponential decaying PSCs I_syn_ex *= P11ex; I_syn_in *= P11in; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = (int)round(t_ref/NESTGPUTimeResolution); - } + } } } @@ -124,7 +124,7 @@ int user_m1::Init(int i_node_0, int n_node, int /*n_port*/, n_var_ = n_scal_var_; n_scal_param_ = N_SCAL_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); @@ -162,14 +162,14 @@ int user_m1::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn_ex, I_syn_in port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn_ex"); port_input_arr_step_ = n_var_; port_input_port_step_ = 1; den_delay_arr_ = GetParamArr() + GetScalParamIdx("den_delay"); - + return 0; } @@ -179,15 +179,15 @@ int user_m1::Update(long long it, double t1) user_m1_Update<<<(n_node_+1023)/1024, 1024>>> (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); // gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m1::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } diff --git a/src/user_m1_iaf_psc_exp.h b/src/user_m1_iaf_psc_exp.h index 994fcf153..fe1a120b7 100644 --- a/src/user_m1_iaf_psc_exp.h +++ b/src/user_m1_iaf_psc_exp.h @@ -73,7 +73,7 @@ enum ScalParamIndexes { N_SCAL_PARAM }; - + const std::string user_m1_scal_var_name[N_SCAL_VAR] = { "I_syn_ex", "I_syn_in", @@ -104,17 +104,17 @@ const std::string user_m1_scal_param_name[N_SCAL_PARAM] = { }; } // namespace - + class user_m1 : public BaseNeuron { public: ~user_m1(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double, float time_resolution); - + int Update(long long it, double t1); int Free(); diff --git a/src/user_m1_iaf_psc_exp_g.cu b/src/user_m1_iaf_psc_exp_g.cu index 2a56e9c79..d837d5a4f 100644 --- a/src/user_m1_iaf_psc_exp_g.cu +++ b/src/user_m1_iaf_psc_exp_g.cu @@ -56,7 +56,7 @@ __global__ void user_m1_Update if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -66,12 +66,12 @@ __global__ void user_m1_Update } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -114,7 +114,7 @@ int user_m1::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_param_ = N_SCAL_PARAM; n_group_param_ = N_GROUP_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); group_param_ = new float[N_GROUP_PARAM]; @@ -144,7 +144,7 @@ int user_m1::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -167,15 +167,15 @@ int user_m1::Update(long long it, double t1) (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_, Theta_rel_, V_reset_rel_, n_refractory_steps, P11, P22, P21, P20 ); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m1::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); delete[] group_param_; - + return 0; } diff --git a/src/user_m1_iaf_psc_exp_g.h b/src/user_m1_iaf_psc_exp_g.h index 96d7686f9..224f7f2c8 100644 --- a/src/user_m1_iaf_psc_exp_g.h +++ b/src/user_m1_iaf_psc_exp_g.h @@ -66,7 +66,7 @@ enum GroupParamIndexes { }; - + const std::string user_m1_scal_var_name[N_SCAL_VAR] = { "I_syn", "V_m_rel", @@ -86,9 +86,9 @@ const std::string user_m1_group_param_name[N_GROUP_PARAM] = { "tau_syn", "t_ref" }; - + } // namespace - + @@ -98,14 +98,14 @@ class user_m1 : public BaseNeuron public: ~user_m1(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double /*time_min*/, float time_res) { time_resolution_ = time_res; return 0; } - + int Update(long long it, double t1); int Free(); diff --git a/src/user_m1_iaf_psc_exp_hc.cu b/src/user_m1_iaf_psc_exp_hc.cu index f0ca56a9a..3f466419d 100644 --- a/src/user_m1_iaf_psc_exp_hc.cu +++ b/src/user_m1_iaf_psc_exp_hc.cu @@ -49,7 +49,7 @@ __global__ void user_m1_hc_Update(int n_node, int i_node_0, if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -59,12 +59,12 @@ __global__ void user_m1_hc_Update(int n_node, int i_node_0, } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -84,7 +84,7 @@ int user_m1_hc::Init(int i_node_0, int n_node, int /*n_port*/, n_var_ = n_scal_var_; n_scal_param_ = N_SCAL_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); @@ -104,7 +104,7 @@ int user_m1_hc::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -119,14 +119,14 @@ int user_m1_hc::Update(long long it, double t1) user_m1_hc_Update<<<(n_node_+1023)/1024, 1024>>> (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m1_hc::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } diff --git a/src/user_m1_iaf_psc_exp_hc.h b/src/user_m1_iaf_psc_exp_hc.h index 1dddb8a80..b0e27a87c 100644 --- a/src/user_m1_iaf_psc_exp_hc.h +++ b/src/user_m1_iaf_psc_exp_hc.h @@ -64,13 +64,13 @@ const std::string user_m1_hc_scal_param_name[N_SCAL_PARAM] = { }; } // namespace - + class user_m1_hc : public BaseNeuron { public: ~user_m1_hc(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); diff --git a/src/user_m1_kernel.h b/src/user_m1_kernel.h index 377d2147c..1d4a1846b 100644 --- a/src/user_m1_kernel.h +++ b/src/user_m1_kernel.h @@ -115,7 +115,7 @@ const std::string user_m1_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_rise", "tau_decay", - "g0" + "g0" }; const std::string user_m1_group_param_name[N_GROUP_PARAM] = { @@ -161,7 +161,7 @@ const std::string user_m1_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/user_m1_kernel.h.bk b/src/user_m1_kernel.h.bk index 4626cb91d..f34ee5ac1 100644 --- a/src/user_m1_kernel.h.bk +++ b/src/user_m1_kernel.h.bk @@ -71,7 +71,7 @@ enum GroupParamIndexes { N_GROUP_PARAM }; - + const std::string user_m1_scal_var_name[N_SCAL_VAR] = { "V_m", "w" @@ -103,7 +103,7 @@ const std::string user_m1_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_rise", "tau_decay", - "g0" + "g0" }; diff --git a/src/user_m1_psc_alpha.cu b/src/user_m1_psc_alpha.cu index 719f8f8b3..f5a939ed3 100644 --- a/src/user_m1_psc_alpha.cu +++ b/src/user_m1_psc_alpha.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0.0; refractory_step = 0; @@ -136,7 +136,7 @@ int user_m1::Init(int i_node_0, int n_node, int n_port, + GetPortParamIdx("I0"); port_weight_arr_step_ = n_param_; port_weight_port_step_ = n_port_param_; - + port_input_arr_ = GetVarArr() + n_scal_var_ + GetPortVarIdx("I1_syn"); port_input_arr_step_ = n_var_; @@ -151,7 +151,7 @@ int user_m1::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m1_psc_alpha.h b/src/user_m1_psc_alpha.h index 26753e4f3..871a1b375 100644 --- a/src/user_m1_psc_alpha.h +++ b/src/user_m1_psc_alpha.h @@ -49,22 +49,22 @@ class user_m1 : public BaseNeuron float h_min_; float h_; user_m1_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m1_psc_alpha_kernel.h b/src/user_m1_psc_alpha_kernel.h index e866044ab..50971d2e8 100644 --- a/src/user_m1_psc_alpha_kernel.h +++ b/src/user_m1_psc_alpha_kernel.h @@ -163,7 +163,7 @@ __device__ { enum { n_port = (NVAR-N_SCAL_VAR)/N_PORT_VAR }; float I_syn_tot = 0.0; - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); for (int i = 0; i<n_port; i++) { diff --git a/src/user_m1_psc_delta.cu b/src/user_m1_psc_delta.cu index 583d2a5d7..4d0889a8b 100644 --- a/src/user_m1_psc_delta.cu +++ b/src/user_m1_psc_delta.cu @@ -53,7 +53,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -104,7 +104,7 @@ int user_m1::Init(int i_node_0, int n_node, int n_port, n_param_ = n_scal_param_ + n_port_param_*n_port; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = user_m1_scal_var_name; scal_param_name_ = user_m1_scal_param_name; group_param_name_ = user_m1_group_param_name; @@ -114,7 +114,7 @@ int user_m1::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -140,13 +140,13 @@ int user_m1::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } int user_m1::Update(long long it, double t1) { rk5_.Update<N_SCAL_VAR, N_SCAL_PARAM>(t1, h_min_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m1_psc_delta.h b/src/user_m1_psc_delta.h index 7495ac3f7..25876977e 100644 --- a/src/user_m1_psc_delta.h +++ b/src/user_m1_psc_delta.h @@ -49,22 +49,22 @@ class user_m1 : public BaseNeuron float h_min_; float h_; user_m1_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + }; #endif diff --git a/src/user_m1_psc_delta_kernel.h b/src/user_m1_psc_delta_kernel.h index 212c79a1d..26fce3420 100644 --- a/src/user_m1_psc_delta_kernel.h +++ b/src/user_m1_psc_delta_kernel.h @@ -131,13 +131,13 @@ const std::string user_m1_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, user_m1_rk5 data_struct) { - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); float V_spike = Delta_T == 0. ? 0. : Delta_T*exp((V - V_th)/Delta_T); diff --git a/src/user_m1_psc_exp.cu b/src/user_m1_psc_exp.cu index f62ffc914..a7602d447 100644 --- a/src/user_m1_psc_exp.cu +++ b/src/user_m1_psc_exp.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -111,7 +111,7 @@ int user_m1::Init(int i_node_0, int n_node, int n_port, n_param_ = n_scal_param_ + n_port_param_*n_port; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = user_m1_scal_var_name; port_var_name_= user_m1_port_var_name; scal_param_name_ = user_m1_scal_param_name; @@ -123,7 +123,7 @@ int user_m1::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -150,7 +150,7 @@ int user_m1::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m1_psc_exp.h b/src/user_m1_psc_exp.h index f228ad9e0..0770db736 100644 --- a/src/user_m1_psc_exp.h +++ b/src/user_m1_psc_exp.h @@ -49,22 +49,22 @@ class user_m1 : public BaseNeuron float h_min_; float h_; user_m1_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m1_psc_exp_g.cu b/src/user_m1_psc_exp_g.cu index 01a460060..143515c4f 100644 --- a/src/user_m1_psc_exp_g.cu +++ b/src/user_m1_psc_exp_g.cu @@ -56,7 +56,7 @@ __global__ void user_m1_Update if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -66,12 +66,12 @@ __global__ void user_m1_Update } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -114,7 +114,7 @@ int user_m1::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_param_ = N_SCAL_PARAM; n_group_param_ = N_GROUP_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); group_param_ = new float[N_GROUP_PARAM]; @@ -144,7 +144,7 @@ int user_m1::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -167,15 +167,15 @@ int user_m1::Update(long long it, double t1) (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_, Theta_rel_, V_reset_rel_, n_refractory_steps, P11, P22, P21, P20 ); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m1::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); delete[] group_param_; - + return 0; } diff --git a/src/user_m1_psc_exp_g.h b/src/user_m1_psc_exp_g.h index e01eefb03..c9d4a5f2c 100644 --- a/src/user_m1_psc_exp_g.h +++ b/src/user_m1_psc_exp_g.h @@ -66,7 +66,7 @@ enum GroupParamIndexes { }; - + const std::string user_m1_scal_var_name[N_SCAL_VAR] = { "I_syn", "V_m_rel", @@ -86,9 +86,9 @@ const std::string user_m1_group_param_name[N_GROUP_PARAM] = { "tau_syn", "t_ref" }; - + } // namespace - + @@ -98,14 +98,14 @@ class user_m1 : public BaseNeuron public: ~user_m1(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double /*time_min*/, float time_res) { time_resolution_ = time_res; return 0; } - + int Update(long long it, double t1); int Free(); diff --git a/src/user_m1_psc_exp_hc.cu b/src/user_m1_psc_exp_hc.cu index 15e7624d6..38ae02d59 100644 --- a/src/user_m1_psc_exp_hc.cu +++ b/src/user_m1_psc_exp_hc.cu @@ -49,7 +49,7 @@ __global__ void user_m1_hc_Update(int n_node, int i_node_0, if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -59,12 +59,12 @@ __global__ void user_m1_hc_Update(int n_node, int i_node_0, } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -84,7 +84,7 @@ int user_m1_hc::Init(int i_node_0, int n_node, int /*n_port*/, n_var_ = n_scal_var_; n_scal_param_ = N_SCAL_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); @@ -104,7 +104,7 @@ int user_m1_hc::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -119,14 +119,14 @@ int user_m1_hc::Update(long long it, double t1) user_m1_hc_Update<<<(n_node_+1023)/1024, 1024>>> (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m1_hc::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } diff --git a/src/user_m1_psc_exp_hc.h b/src/user_m1_psc_exp_hc.h index bc2974cfc..60bb3286f 100644 --- a/src/user_m1_psc_exp_hc.h +++ b/src/user_m1_psc_exp_hc.h @@ -64,13 +64,13 @@ const std::string user_m1_hc_scal_param_name[N_SCAL_PARAM] = { }; } // namespace - + class user_m1_hc : public BaseNeuron { public: ~user_m1_hc(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); diff --git a/src/user_m1_psc_exp_kernel.h b/src/user_m1_psc_exp_kernel.h index 7133ff8c4..3ab1afc8e 100644 --- a/src/user_m1_psc_exp_kernel.h +++ b/src/user_m1_psc_exp_kernel.h @@ -147,7 +147,7 @@ const std::string user_m1_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, @@ -155,7 +155,7 @@ __device__ { enum { n_port = (NVAR-N_SCAL_VAR)/N_PORT_VAR }; float I_syn_tot = 0.0; - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); for (int i = 0; i<n_port; i++) { diff --git a/src/user_m2.cu b/src/user_m2.cu index accb9bd9e..154483823 100644 --- a/src/user_m2.cu +++ b/src/user_m2.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -99,7 +99,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, user_m2_rk5 data_struct) @@ -143,7 +143,7 @@ int user_m2::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -167,7 +167,7 @@ int user_m2::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m2.h b/src/user_m2.h index 7efc647c0..49741a22a 100644 --- a/src/user_m2.h +++ b/src/user_m2.h @@ -49,22 +49,22 @@ class user_m2 : public BaseNeuron float h_min_; float h_; user_m2_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m2_cond_alpha.cu b/src/user_m2_cond_alpha.cu index b88aaf1a9..58d3ee7e3 100644 --- a/src/user_m2_cond_alpha.cu +++ b/src/user_m2_cond_alpha.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -81,7 +81,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, user_m2_rk5 data_struct) @@ -113,7 +113,7 @@ int user_m2::Init(int i_node_0, int n_node, int n_port, n_param_ = n_scal_param_ + n_port_param_*n_port; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = user_m2_scal_var_name; port_var_name_= user_m2_port_var_name; scal_param_name_ = user_m2_scal_param_name; @@ -125,7 +125,7 @@ int user_m2::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -149,7 +149,7 @@ int user_m2::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m2_cond_alpha.h b/src/user_m2_cond_alpha.h index d727b12da..09706d057 100644 --- a/src/user_m2_cond_alpha.h +++ b/src/user_m2_cond_alpha.h @@ -49,22 +49,22 @@ class user_m2 : public BaseNeuron float h_min_; float h_; user_m2_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m2_cond_alpha_kernel.h b/src/user_m2_cond_alpha_kernel.h index 67163e4bd..99f8c9b63 100644 --- a/src/user_m2_cond_alpha_kernel.h +++ b/src/user_m2_cond_alpha_kernel.h @@ -112,7 +112,7 @@ const std::string user_m2_scal_param_name[N_SCAL_PARAM] = { const std::string user_m2_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_syn", - "g0" + "g0" }; const std::string user_m2_group_param_name[N_GROUP_PARAM] = { @@ -157,7 +157,7 @@ const std::string user_m2_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/user_m2_cond_beta.cu b/src/user_m2_cond_beta.cu index 61a4cd390..c0619a304 100644 --- a/src/user_m2_cond_beta.cu +++ b/src/user_m2_cond_beta.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -99,7 +99,7 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, } } - + __device__ void NodeInit(int n_var, int n_param, double x, float *y, float *param, user_m2_rk5 data_struct) @@ -143,7 +143,7 @@ int user_m2::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -167,7 +167,7 @@ int user_m2::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m2_cond_beta.h b/src/user_m2_cond_beta.h index 32ff7723b..f6fccfc55 100644 --- a/src/user_m2_cond_beta.h +++ b/src/user_m2_cond_beta.h @@ -49,22 +49,22 @@ class user_m2 : public BaseNeuron float h_min_; float h_; user_m2_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m2_cond_beta_kernel.h b/src/user_m2_cond_beta_kernel.h index 914adb95c..a13b13fe2 100644 --- a/src/user_m2_cond_beta_kernel.h +++ b/src/user_m2_cond_beta_kernel.h @@ -115,7 +115,7 @@ const std::string user_m2_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_rise", "tau_decay", - "g0" + "g0" }; const std::string user_m2_group_param_name[N_GROUP_PARAM] = { @@ -161,7 +161,7 @@ const std::string user_m2_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/user_m2_iaf_psc_exp.cu b/src/user_m2_iaf_psc_exp.cu index d09af4694..ff83ce992 100644 --- a/src/user_m2_iaf_psc_exp.cu +++ b/src/user_m2_iaf_psc_exp.cu @@ -72,12 +72,12 @@ __global__ void user_m2_Calibrate(int n_node, float *param_arr, int i_neuron = threadIdx.x + blockIdx.x * blockDim.x; if (i_neuron<n_node) { float *param = param_arr + n_param*i_neuron; - + P11ex = exp( -h / tau_ex ); P11in = exp( -h / tau_in ); P22 = exp( -h / tau_m ); P21ex = (float)propagator_32( tau_ex, tau_m, C_m, h ); - P21in = (float)propagator_32( tau_in, tau_m, C_m, h ); + P21in = (float)propagator_32( tau_in, tau_m, C_m, h ); P20 = tau_m / C_m * ( 1.0 - P22 ); } } @@ -90,7 +90,7 @@ __global__ void user_m2_Update(int n_node, int i_node_0, float *var_arr, if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -101,12 +101,12 @@ __global__ void user_m2_Update(int n_node, int i_node_0, float *var_arr, // exponential decaying PSCs I_syn_ex *= P11ex; I_syn_in *= P11in; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = (int)round(t_ref/NESTGPUTimeResolution); - } + } } } @@ -126,7 +126,7 @@ int user_m2::Init(int i_node_0, int n_node, int /*n_port*/, n_var_ = n_scal_var_; n_scal_param_ = N_SCAL_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); @@ -164,14 +164,14 @@ int user_m2::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn_ex, I_syn_in port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn_ex"); port_input_arr_step_ = n_var_; port_input_port_step_ = 1; den_delay_arr_ = GetParamArr() + GetScalParamIdx("den_delay"); - + return 0; } @@ -181,15 +181,15 @@ int user_m2::Update(long long it, double t1) user_m2_Update<<<(n_node_+1023)/1024, 1024>>> (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); // gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m2::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } diff --git a/src/user_m2_iaf_psc_exp.h b/src/user_m2_iaf_psc_exp.h index e29462d9d..785257832 100644 --- a/src/user_m2_iaf_psc_exp.h +++ b/src/user_m2_iaf_psc_exp.h @@ -73,7 +73,7 @@ enum ScalParamIndexes { N_SCAL_PARAM }; - + const std::string user_m2_scal_var_name[N_SCAL_VAR] = { "I_syn_ex", "I_syn_in", @@ -104,17 +104,17 @@ const std::string user_m2_scal_param_name[N_SCAL_PARAM] = { }; } // namespace - + class user_m2 : public BaseNeuron { public: ~user_m2(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double, float time_resolution); - + int Update(long long it, double t1); int Free(); diff --git a/src/user_m2_iaf_psc_exp_g.cu b/src/user_m2_iaf_psc_exp_g.cu index 22a7e3393..ef4656a1d 100644 --- a/src/user_m2_iaf_psc_exp_g.cu +++ b/src/user_m2_iaf_psc_exp_g.cu @@ -56,7 +56,7 @@ __global__ void user_m2_Update if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -66,12 +66,12 @@ __global__ void user_m2_Update } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -114,7 +114,7 @@ int user_m2::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_param_ = N_SCAL_PARAM; n_group_param_ = N_GROUP_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); group_param_ = new float[N_GROUP_PARAM]; @@ -144,7 +144,7 @@ int user_m2::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -167,15 +167,15 @@ int user_m2::Update(long long it, double t1) (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_, Theta_rel_, V_reset_rel_, n_refractory_steps, P11, P22, P21, P20 ); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m2::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); delete[] group_param_; - + return 0; } diff --git a/src/user_m2_iaf_psc_exp_g.h b/src/user_m2_iaf_psc_exp_g.h index 00eb07ba0..96612872b 100644 --- a/src/user_m2_iaf_psc_exp_g.h +++ b/src/user_m2_iaf_psc_exp_g.h @@ -66,7 +66,7 @@ enum GroupParamIndexes { }; - + const std::string user_m2_scal_var_name[N_SCAL_VAR] = { "I_syn", "V_m_rel", @@ -86,9 +86,9 @@ const std::string user_m2_group_param_name[N_GROUP_PARAM] = { "tau_syn", "t_ref" }; - + } // namespace - + @@ -98,14 +98,14 @@ class user_m2 : public BaseNeuron public: ~user_m2(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double /*time_min*/, float time_res) { time_resolution_ = time_res; return 0; } - + int Update(long long it, double t1); int Free(); diff --git a/src/user_m2_iaf_psc_exp_hc.cu b/src/user_m2_iaf_psc_exp_hc.cu index 4b20ebb0a..dd9f62448 100644 --- a/src/user_m2_iaf_psc_exp_hc.cu +++ b/src/user_m2_iaf_psc_exp_hc.cu @@ -49,7 +49,7 @@ __global__ void user_m2_hc_Update(int n_node, int i_node_0, if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -59,12 +59,12 @@ __global__ void user_m2_hc_Update(int n_node, int i_node_0, } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -84,7 +84,7 @@ int user_m2_hc::Init(int i_node_0, int n_node, int /*n_port*/, n_var_ = n_scal_var_; n_scal_param_ = N_SCAL_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); @@ -104,7 +104,7 @@ int user_m2_hc::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -119,14 +119,14 @@ int user_m2_hc::Update(long long it, double t1) user_m2_hc_Update<<<(n_node_+1023)/1024, 1024>>> (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m2_hc::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } diff --git a/src/user_m2_iaf_psc_exp_hc.h b/src/user_m2_iaf_psc_exp_hc.h index c3307c56b..83a84fc8a 100644 --- a/src/user_m2_iaf_psc_exp_hc.h +++ b/src/user_m2_iaf_psc_exp_hc.h @@ -64,13 +64,13 @@ const std::string user_m2_hc_scal_param_name[N_SCAL_PARAM] = { }; } // namespace - + class user_m2_hc : public BaseNeuron { public: ~user_m2_hc(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); diff --git a/src/user_m2_kernel.h b/src/user_m2_kernel.h index 4648b3a84..0349c9bc1 100644 --- a/src/user_m2_kernel.h +++ b/src/user_m2_kernel.h @@ -115,7 +115,7 @@ const std::string user_m2_port_param_name[N_PORT_PARAM] = { "E_rev", "tau_rise", "tau_decay", - "g0" + "g0" }; const std::string user_m2_group_param_name[N_GROUP_PARAM] = { @@ -161,7 +161,7 @@ const std::string user_m2_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, diff --git a/src/user_m2_psc_alpha.cu b/src/user_m2_psc_alpha.cu index 54ce67729..73f7a7e10 100644 --- a/src/user_m2_psc_alpha.cu +++ b/src/user_m2_psc_alpha.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0.0; refractory_step = 0; @@ -136,7 +136,7 @@ int user_m2::Init(int i_node_0, int n_node, int n_port, + GetPortParamIdx("I0"); port_weight_arr_step_ = n_param_; port_weight_port_step_ = n_port_param_; - + port_input_arr_ = GetVarArr() + n_scal_var_ + GetPortVarIdx("I1_syn"); port_input_arr_step_ = n_var_; @@ -151,7 +151,7 @@ int user_m2::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m2_psc_alpha.h b/src/user_m2_psc_alpha.h index be345a990..dd7b4a303 100644 --- a/src/user_m2_psc_alpha.h +++ b/src/user_m2_psc_alpha.h @@ -49,22 +49,22 @@ class user_m2 : public BaseNeuron float h_min_; float h_; user_m2_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m2_psc_alpha_kernel.h b/src/user_m2_psc_alpha_kernel.h index a0a1cc129..65d4ce87b 100644 --- a/src/user_m2_psc_alpha_kernel.h +++ b/src/user_m2_psc_alpha_kernel.h @@ -163,7 +163,7 @@ __device__ { enum { n_port = (NVAR-N_SCAL_VAR)/N_PORT_VAR }; float I_syn_tot = 0.0; - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); for (int i = 0; i<n_port; i++) { diff --git a/src/user_m2_psc_delta.cu b/src/user_m2_psc_delta.cu index c7d318d37..ecae092c3 100644 --- a/src/user_m2_psc_delta.cu +++ b/src/user_m2_psc_delta.cu @@ -53,7 +53,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -104,7 +104,7 @@ int user_m2::Init(int i_node_0, int n_node, int n_port, n_param_ = n_scal_param_ + n_port_param_*n_port; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = user_m2_scal_var_name; scal_param_name_ = user_m2_scal_param_name; group_param_name_ = user_m2_group_param_name; @@ -114,7 +114,7 @@ int user_m2::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -140,13 +140,13 @@ int user_m2::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } int user_m2::Update(long long it, double t1) { rk5_.Update<N_SCAL_VAR, N_SCAL_PARAM>(t1, h_min_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m2_psc_delta.h b/src/user_m2_psc_delta.h index ffa95e367..5581a4388 100644 --- a/src/user_m2_psc_delta.h +++ b/src/user_m2_psc_delta.h @@ -49,22 +49,22 @@ class user_m2 : public BaseNeuron float h_min_; float h_; user_m2_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + }; #endif diff --git a/src/user_m2_psc_delta_kernel.h b/src/user_m2_psc_delta_kernel.h index 4b505c0fc..13a4edc72 100644 --- a/src/user_m2_psc_delta_kernel.h +++ b/src/user_m2_psc_delta_kernel.h @@ -131,13 +131,13 @@ const std::string user_m2_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, user_m2_rk5 data_struct) { - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); float V_spike = Delta_T == 0. ? 0. : Delta_T*exp((V - V_th)/Delta_T); diff --git a/src/user_m2_psc_exp.cu b/src/user_m2_psc_exp.cu index a0e99d306..90e822152 100644 --- a/src/user_m2_psc_exp.cu +++ b/src/user_m2_psc_exp.cu @@ -54,7 +54,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, V_reset = -60.0; t_ref = 0.0; den_delay = 0.0; - + V_m = E_L; w = 0; refractory_step = 0; @@ -111,7 +111,7 @@ int user_m2::Init(int i_node_0, int n_node, int n_port, n_param_ = n_scal_param_ + n_port_param_*n_port; group_param_ = new float[N_GROUP_PARAM]; - + scal_var_name_ = user_m2_scal_var_name; port_var_name_= user_m2_port_var_name; scal_param_name_ = user_m2_scal_param_name; @@ -123,7 +123,7 @@ int user_m2::Init(int i_node_0, int n_node, int n_port, SetGroupParam("h_min_rel", 1.0e-3); SetGroupParam("h0_rel", 1.0e-2); h_ = h0_rel_* 0.1; - + rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_); var_arr_ = rk5_.GetYArr(); param_arr_ = rk5_.GetParamArr(); @@ -150,7 +150,7 @@ int user_m2::Calibrate(double time_min, float time_resolution) h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); - + return 0; } diff --git a/src/user_m2_psc_exp.h b/src/user_m2_psc_exp.h index f7add6d19..c2ecb0580 100644 --- a/src/user_m2_psc_exp.h +++ b/src/user_m2_psc_exp.h @@ -49,22 +49,22 @@ class user_m2 : public BaseNeuron float h_min_; float h_; user_m2_rk5 rk5_data_struct_; - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double time_min, float time_resolution); - + int Update(long long it, double t1); - + int GetX(int i_neuron, int n_node, double *x) { return rk5_.GetX(i_neuron, n_node, x); } - + int GetY(int i_var, int i_neuron, int n_node, float *y) { return rk5_.GetY(i_var, i_neuron, n_node, y); } - + template<int N_PORT> int UpdateNR(long long it, double t1); diff --git a/src/user_m2_psc_exp_g.cu b/src/user_m2_psc_exp_g.cu index e776615fc..867512b92 100644 --- a/src/user_m2_psc_exp_g.cu +++ b/src/user_m2_psc_exp_g.cu @@ -56,7 +56,7 @@ __global__ void user_m2_Update if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -66,12 +66,12 @@ __global__ void user_m2_Update } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -114,7 +114,7 @@ int user_m2::Init(int i_node_0, int n_node, int /*n_port*/, n_scal_param_ = N_SCAL_PARAM; n_group_param_ = N_GROUP_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); group_param_ = new float[N_GROUP_PARAM]; @@ -144,7 +144,7 @@ int user_m2::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -167,15 +167,15 @@ int user_m2::Update(long long it, double t1) (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_, Theta_rel_, V_reset_rel_, n_refractory_steps, P11, P22, P21, P20 ); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m2::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); delete[] group_param_; - + return 0; } diff --git a/src/user_m2_psc_exp_g.h b/src/user_m2_psc_exp_g.h index b20d08894..396bb8a70 100644 --- a/src/user_m2_psc_exp_g.h +++ b/src/user_m2_psc_exp_g.h @@ -66,7 +66,7 @@ enum GroupParamIndexes { }; - + const std::string user_m2_scal_var_name[N_SCAL_VAR] = { "I_syn", "V_m_rel", @@ -86,9 +86,9 @@ const std::string user_m2_group_param_name[N_GROUP_PARAM] = { "tau_syn", "t_ref" }; - + } // namespace - + @@ -98,14 +98,14 @@ class user_m2 : public BaseNeuron public: ~user_m2(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); int Calibrate(double /*time_min*/, float time_res) { time_resolution_ = time_res; return 0; } - + int Update(long long it, double t1); int Free(); diff --git a/src/user_m2_psc_exp_hc.cu b/src/user_m2_psc_exp_hc.cu index 7ea75ba1e..334790ac6 100644 --- a/src/user_m2_psc_exp_hc.cu +++ b/src/user_m2_psc_exp_hc.cu @@ -49,7 +49,7 @@ __global__ void user_m2_hc_Update(int n_node, int i_node_0, if (i_neuron<n_node) { float *var = var_arr + n_var*i_neuron; float *param = param_arr + n_param*i_neuron; - + if ( refractory_step > 0.0 ) { // neuron is absolute refractory refractory_step -= 1.0; @@ -59,12 +59,12 @@ __global__ void user_m2_hc_Update(int n_node, int i_node_0, } // exponential decaying PSC I_syn *= P11; - + if (V_m_rel >= Theta_rel ) { // threshold crossing PushSpike(i_node_0 + i_neuron, 1.0); V_m_rel = V_reset_rel; refractory_step = n_refractory_steps; - } + } } } @@ -84,7 +84,7 @@ int user_m2_hc::Init(int i_node_0, int n_node, int /*n_port*/, n_var_ = n_scal_var_; n_scal_param_ = N_SCAL_PARAM; n_param_ = n_scal_param_; - + AllocParamArr(); AllocVarArr(); @@ -104,7 +104,7 @@ int user_m2_hc::Init(int i_node_0, int n_node, int /*n_port*/, sizeof(float), cudaMemcpyHostToDevice)); port_weight_arr_step_ = 0; port_weight_port_step_ = 0; - + // input spike signal is stored in I_syn port_input_arr_ = GetVarArr() + GetScalVarIdx("I_syn"); port_input_arr_step_ = n_var_; @@ -119,14 +119,14 @@ int user_m2_hc::Update(long long it, double t1) user_m2_hc_Update<<<(n_node_+1023)/1024, 1024>>> (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); //gpuErrchk( cudaDeviceSynchronize() ); - + return 0; } int user_m2_hc::Free() { - FreeVarArr(); + FreeVarArr(); FreeParamArr(); - + return 0; } diff --git a/src/user_m2_psc_exp_hc.h b/src/user_m2_psc_exp_hc.h index 12886aa23..38090637d 100644 --- a/src/user_m2_psc_exp_hc.h +++ b/src/user_m2_psc_exp_hc.h @@ -64,13 +64,13 @@ const std::string user_m2_hc_scal_param_name[N_SCAL_PARAM] = { }; } // namespace - + class user_m2_hc : public BaseNeuron { public: ~user_m2_hc(); - + int Init(int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long *seed); diff --git a/src/user_m2_psc_exp_kernel.h b/src/user_m2_psc_exp_kernel.h index 55fc7356a..dccb0616f 100644 --- a/src/user_m2_psc_exp_kernel.h +++ b/src/user_m2_psc_exp_kernel.h @@ -147,7 +147,7 @@ const std::string user_m2_group_param_name[N_GROUP_PARAM] = { #define h_min_rel_ group_param_[i_h_min_rel] #define h0_rel_ group_param_[i_h0_rel] - + template<int NVAR, int NPARAM> //, class DataStruct> __device__ void Derivatives(double x, float *y, float *dydx, float *param, @@ -155,7 +155,7 @@ __device__ { enum { n_port = (NVAR-N_SCAL_VAR)/N_PORT_VAR }; float I_syn_tot = 0.0; - + float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak); for (int i = 0; i<n_port; i++) { diff --git a/src/write_iaf_psc_exp_hc_params.py b/src/write_iaf_psc_exp_hc_params.py index f6417fdfc..7b67e364b 100644 --- a/src/write_iaf_psc_exp_hc_params.py +++ b/src/write_iaf_psc_exp_hc_params.py @@ -4,40 +4,39 @@ E_L = -65.0 Theta_rel = 15.0 V_reset_rel = 0.0 -tau_syn = 0.5 +tau_syn = 0.5 t_ref = 2.0 import numpy as np + def propagator_32(tau_syn, tau, C, h): - P32_linear = 1.0 / ( 2.0 * C * tau * tau ) * h * h \ - * ( tau_syn - tau ) * np.exp( -h / tau ) - P32_singular = h / C * np.exp( -h / tau ) - P32 = -tau / ( C * ( 1 - tau / tau_syn ) ) * np.exp( -h / tau_syn ) \ - * np.expm1( h * ( 1 / tau_syn - 1 / tau ) ) - - dev_P32 = abs( P32 - P32_singular ) - - if ( tau == tau_syn or ( abs( tau - tau_syn ) < 0.1 and dev_P32 > 2.0 - * abs( P32_linear ) ) ): + P32_linear = 1.0 / (2.0 * C * tau * tau) * h * h * (tau_syn - tau) * np.exp(-h / tau) + P32_singular = h / C * np.exp(-h / tau) + P32 = -tau / (C * (1 - tau / tau_syn)) * np.exp(-h / tau_syn) * np.expm1(h * (1 / tau_syn - 1 / tau)) + + dev_P32 = abs(P32 - P32_singular) + + if tau == tau_syn or (abs(tau - tau_syn) < 0.1 and dev_P32 > 2.0 * abs(P32_linear)): return P32_singular else: return P32 + h = time_res -P11 = np.exp( -h / tau_syn ) -P22 = np.exp( -h / tau_m ) -P21 = propagator_32( tau_syn, tau_m, C_m, h ) -P20 = tau_m / C_m * ( 1.0 - P22 ) - -n_refractory_steps = int(round(t_ref/time_res)) - -with open('iaf_psc_exp_hc_params.h', 'w') as p_file: - p_file.write('#define P11 ' + '{:.7E}'.format(P11) + '\n') - p_file.write('#define P22 ' + '{:.7E}'.format(P22) + '\n') - p_file.write('#define P21 ' + '{:.7E}'.format(P21) + '\n') - p_file.write('#define P20 ' + '{:.7E}'.format(P20) + '\n') - p_file.write('#define Theta_rel ' + '{:.7E}'.format(Theta_rel) + '\n') - p_file.write('#define V_reset_rel ' + '{:.7E}'.format(V_reset_rel) + '\n') - p_file.write('#define n_refractory_steps ' + str(n_refractory_steps) + '\n') +P11 = np.exp(-h / tau_syn) +P22 = np.exp(-h / tau_m) +P21 = propagator_32(tau_syn, tau_m, C_m, h) +P20 = tau_m / C_m * (1.0 - P22) + +n_refractory_steps = int(round(t_ref / time_res)) + +with open("iaf_psc_exp_hc_params.h", "w") as p_file: + p_file.write("#define P11 " + "{:.7E}".format(P11) + "\n") + p_file.write("#define P22 " + "{:.7E}".format(P22) + "\n") + p_file.write("#define P21 " + "{:.7E}".format(P21) + "\n") + p_file.write("#define P20 " + "{:.7E}".format(P20) + "\n") + p_file.write("#define Theta_rel " + "{:.7E}".format(Theta_rel) + "\n") + p_file.write("#define V_reset_rel " + "{:.7E}".format(V_reset_rel) + "\n") + p_file.write("#define n_refractory_steps " + str(n_refractory_steps) + "\n") diff --git a/todo.txt b/todo.txt index ac122294b..41ac7ae0c 100644 --- a/todo.txt +++ b/todo.txt @@ -7,4 +7,3 @@ implement new tests and improve existing ones implement other neuron models Make rk5 dynamic? -