Skip to content

Commit

Permalink
Validated multi-chunk simulations (#6)
Browse files Browse the repository at this point in the history
* dump

* add multi sim and test

* fix merged pyproject

* fix numpy ints messing with NEURON

* fix NeuronPopulation typo and numpy int lookup

* improve arborized model type handling (still open issue)

* add chunked test

* fix multi CM/chunk transmapping + gid on instances

* validate multichunk test

* validate multi CM

* add ci

* fix ci

* use fixed arborize version that works without installing arbor

* bump deps

* avoid neuronsimulator/nrn#2641
  • Loading branch information
Helveg committed Feb 14, 2024
1 parent c4467ad commit b4cae8a
Show file tree
Hide file tree
Showing 19 changed files with 467 additions and 173 deletions.
34 changes: 34 additions & 0 deletions .bumpversion.cfg
@@ -0,0 +1,34 @@
[bumpversion]
current_version = 0.0.0b2
files = bsb_neuron/__init__.py
commit = True
tag = True
parse = ^
(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)
((?P<prekind>a|alpha|b|beta|d|dev|rc)
(?P<pre>\d+) # pre-release version num
)?
(\.(?P<postkind>post)(?P<post>\d+))? # post-release
serialize =
{major}.{minor}.{patch}{prekind}{pre}.{postkind}{post}
{major}.{minor}.{patch}{prekind}{pre}
{major}.{minor}.{patch}{postkind}{post}
{major}.{minor}.{patch}

[bumpversion:part:prekind]
optional_value = _
values =
_
dev
d
alpha
a
beta
b
rc

[bumpversion:part:postkind]
optional_value = _
values =
_
post
15 changes: 15 additions & 0 deletions .github/workflows/black.yml
@@ -0,0 +1,15 @@
name: Black

on: [push, pull_request]

jobs:
black:
if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4
- uses: psf/black@stable
with:
options: "--check --verbose"
version: "24.1.1"
20 changes: 20 additions & 0 deletions .github/workflows/isort.yml
@@ -0,0 +1,20 @@
name: Run isort
on:
- push

jobs:
isort:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Install apt dependencies
# Install `libopenmpi` for mpi4py
run: |
sudo apt update
sudo apt install openmpi-bin libopenmpi-dev
# Install dependencies for proper 1st/2nd/3rd party import sorting
- run: pip install -e .[parallel]
- uses: isort/isort-action@master
33 changes: 33 additions & 0 deletions .github/workflows/main.yaml
@@ -0,0 +1,33 @@
name: Test BSB NEURON adapter

on: [push, pull_request]

jobs:
build:
if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11"]
steps:
- uses: actions/checkout@v4

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install apt dependencies
run: |
sudo apt update
sudo apt install openmpi-bin libopenmpi-dev
- name: Install dependencies & self
run: |
pip install --upgrade pip
# Install self, with test dependencies
pip install .[test,parallel]
- name: Run tests & coverage
run: |
coverage run -p -m unittest discover -v -s ./tests
mpiexec -n 2 coverage run -p -m unittest discover -v -s ./tests
10 changes: 10 additions & 0 deletions .pre-commit-config.yaml
@@ -0,0 +1,10 @@
repos:
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 24.1.1
hooks:
- id: black
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
name: isort (python)
3 changes: 2 additions & 1 deletion bsb_neuron/__init__.py
Expand Up @@ -3,9 +3,10 @@
"""

from bsb.simulation import SimulationBackendPlugin

from . import devices
from .adapter import NeuronAdapter
from .simulation import NeuronSimulation
from . import devices

__version__ = "0.0.0b2"
__plugin__ = SimulationBackendPlugin(Simulation=NeuronSimulation, Adapter=NeuronAdapter)
118 changes: 82 additions & 36 deletions bsb_neuron/adapter.py
Expand Up @@ -2,29 +2,28 @@
import itertools
import os
import time
import numpy as np
import typing

from neo import AnalogSignal

import numpy as np
from bsb.exceptions import AdapterError, DatasetNotFoundError
from bsb.reporting import report
from bsb.services import MPI
from bsb.simulation.adapter import SimulatorAdapter, SimulationData
from bsb.simulation.adapter import AdapterProgress, SimulationData, SimulatorAdapter
from bsb.simulation.results import SimulationResult
from bsb.storage import Chunk
from neo import AnalogSignal

if typing.TYPE_CHECKING:
from bsb.simulation.simulation import Simulation

from .cell import NeuronCell


class NeuronSimulationData(SimulationData):
def __init__(self, simulation: "Simulation", result=None):
super().__init__(simulation, result=result)
self.cells = dict()
self.cid_offsets = dict()
self.connections = dict()
self.first_gid: int = None


class NeuronResult(SimulationResult):
Expand Down Expand Up @@ -103,28 +102,30 @@ def load_balance(self, simulation):
simdata.chunk_node_map[chunk] = node
simdata.chunks = simdata.node_chunk_alloc[MPI.get_rank()]

def run(self, simulation: "Simulation"):
if simulation not in self.simdata:
raise AdapterError("Simulation was not prepared")
def run(self, *simulations: "Simulation"):
unprepared = [sim for sim in simulations if sim not in self.simdata]
if unprepared:
raise AdapterError(f"Unprepared for simulations: {', '.join(unprepared)}")
try:
report("Simulating...", level=2)
pc = self.engine.ParallelContext()
pc.set_maxstep(10)
self.engine.finitialize(self.initial)
simulation.start_progress(simulation.duration)
for oi, i in simulation.step_progress(simulation.duration, 1):
t = time.time()
duration = max(sim.duration for sim in simulations)
progress = AdapterProgress(duration)
for oi, i in progress.steps(step=1):
pc.psolve(i)
simulation.progress(i)
if os.path.exists("interrupt_neuron"):
report("Iterrupt requested. Stopping simulation.", level=1)
break
tick = progress.tick(i)
for listener in self._progress_listeners:
listener(simulations, tick)
progress.complete()
report("Finished simulation.", level=2)
finally:
result = self.simdata[simulation].result
del self.simdata[simulation]
results = [self.simdata[sim].result for sim in simulations]
for sim in simulations:
del self.simdata[sim]

return result
return results

def create_neurons(self, simulation):
simdata = self.simdata[simulation]
Expand Down Expand Up @@ -163,45 +164,90 @@ def _allocate_transmitters(self, simulation):
)
self.next_gid += max_trans
simdata.alloc = (first, self.next_gid)
simdata.transmap = self._map_transmitters(simulation, simdata)
simdata.transmap = self._map_transceivers(simulation, simdata)

def _map_transmitters(self, simulation, simdata):
def _map_transceivers(self, simulation, simdata):
blocks = []
offset = 0
transmap = {}

for cm, cs in simulation.get_connectivity_sets().items():
# For each connectivity set, determine how many unique transmitters they will place.
pre, _ = cs.load_connections().as_globals().all()
pre[:, 0] += simdata.cid_offsets[cs.pre_type]
blocks.append(pre[:, :2])
if blocks:
blocks = np.unique(np.concatenate(blocks), axis=0)
return {
tuple(loc): gid + simdata.alloc[0]
for gid, loc in zip(itertools.count(), blocks)
}
all_cm_transmitters = np.unique(pre[:, :2], axis=0)
# Now look up which transmitters are on our chunks
pre_t, _ = cs.load_connections().from_(simdata.chunks).as_globals().all()
our_cm_transmitters = np.unique(pre_t[:, :2], axis=0)
# Look up the local ids of those transmitters
pre_lc, _ = cs.load_connections().from_(simdata.chunks).all()
local_cm_transmitters = np.unique(pre_lc[:, :2], axis=0)

# Find the common indexes between all the transmitters, and the
# transmitters on our chunk.
dtype = ", ".join([str(all_cm_transmitters.dtype)] * 2)
_, _, idx_tm = np.intersect1d(
our_cm_transmitters.view(dtype),
all_cm_transmitters.view(dtype),
assume_unique=True,
return_indices=True,
)

# Look up which transmitters have receivers on our chunks
pre_gc, _ = cs.load_connections().incoming().to(simdata.chunks).all()
local_cm_receivers = np.unique(pre_gc[:, :2], axis=0)
_, _, idx_rcv = np.intersect1d(
local_cm_receivers.view(dtype),
all_cm_transmitters.view(dtype),
assume_unique=True,
return_indices=True,
)

# Store a map of the local chunk transmitters to their GIDs
transmap[cm] = {
"transmitters": dict(
zip(map(tuple, local_cm_transmitters), map(int, idx_tm + offset))
),
"receivers": dict(
zip(map(tuple, local_cm_receivers), map(int, idx_rcv + offset))
),
}
# Offset by the total amount of transmitter GIDs used by this ConnSet.
offset += len(all_cm_transmitters)
return transmap

def _create_population(self, simdata, cell_model, ps, offset):
data = []
for var in ("positions", "morphologies", "rotations", "additional"):
for var in (
"ids",
"positions",
"morphologies",
"rotations",
"additional",
):
try:
data.append(getattr(ps, f"load_{var}")())
except DatasetNotFoundError:
data.append(itertools.repeat(None))

with fill_parameter_data(cell_model.parameters, data):
instances = cell_model.create_instances(len(ps), *data)
simdata.populations[cell_model] = NeuronPopulation(instances)
for id, instance in zip(ps.load_ids(), instances):
cid = offset + id
instance.id = cid
instance.cell_model = cell_model
simdata.cells[cid] = instance
simdata.populations[cell_model] = NeuronPopulation(cell_model, instances)


class NeuronPopulation(list):
def __init__(self, model: "NeuronCell", instances: list):
self._model = model
super().__init__(instances)
for instance in instances:
instance.cell_model = model

def __getitem__(self, item):
# Boolean masking, kind of
if getattr(item, "dtype", None) == bool or _all_bools(item):
return NeuronPopulation([p for p, b in zip(self, item) if b])
elif getattr(item, "dtype", None) == int or _all_ints(item):
if getattr(item, "ndim", None) == 0:
return super().__getitem__(item)
return NeuronPopulation([self[i] for i in item])
else:
return super().__getitem__(item)
Expand Down
40 changes: 32 additions & 8 deletions bsb_neuron/cell.py
@@ -1,9 +1,10 @@
import itertools
from typing import TYPE_CHECKING

from arborize import ModelDefinition, define_model
from bsb import config
from bsb.config import types
from bsb.config.types import object_
from bsb.simulation.cell import CellModel
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from bsb.morphologies import MorphologySet
Expand All @@ -13,7 +14,9 @@
attr_name="model_strategy", required=False, default="arborize", auto_classmap=True
)
class NeuronCell(CellModel):
def create_instances(self, count, pos, morpho: "MorphologySet", rot, additional):
def create_instances(
self, count, ids, pos, morpho: "MorphologySet", rot, additional
):
def dictzip():
yield from (
dict(zip(additional.keys(), values[:-1]))
Expand All @@ -22,14 +25,15 @@ def dictzip():
)
)

pos, morpho, rot = (
ids, pos, morpho, rot = (
iter(ids),
iter(pos),
iter(morpho),
iter(rot),
)
additer = dictzip()
return [
self._create(i, next(pos), next(morpho), next(rot), next(additer))
self._create(next(ids), next(pos), next(morpho), next(rot), next(additer))
for i in range(count)
]

Expand All @@ -39,14 +43,34 @@ def _create(self, id, pos, morpho, rot, additional):
f"Cell {id} of {self.name} has no morphology, can't use {self.__class__.__name__} to construct it."
)
instance = self.create(id, pos, morpho, rot, additional)
instance._bsb_ref_id = id
instance._bsb_ref_pos = pos
instance.id = id
return instance


class ArborizeModelTypeHandler(object_):
@property
def __name__(self):
return "arborized model definition"

def __call__(self, value):
if isinstance(value, dict):
model = define_model(value)
model._cfg_inv = value
return model
else:
return super().__call__(value)

def __inv__(self, value):
inv_value = super().__inv__(value)
if isinstance(inv_value, ModelDefinition):
# fixme: not good, should at least be converted back to a compatible dict
# definition
return str(inv_value)


@config.node
class ArborizedModel(NeuronCell, classmap_entry="arborize"):
model = config.attr(type=types.object_(), required=True)
model = config.attr(type=ArborizeModelTypeHandler(), required=True)
_schematics = {}

def create(self, id, pos, morpho, rot, additional):
Expand Down

0 comments on commit b4cae8a

Please sign in to comment.