BinaryNetwork.BinaryNetwork

Core binary-network data structures and simulation engine.

The module provides generic neuron populations, synapse generators, and the BinaryNetwork simulation class used by the repository-specific clustered E/I wrapper.

   1# Created by Felix J. Schmitt on 05/30/2023.
   2# Class for binary networks (state is 0 or 1)
   3"""Core binary-network data structures and simulation engine.
   4
   5The module provides generic neuron populations, synapse generators, and the
   6`BinaryNetwork` simulation class used by the repository-specific clustered E/I
   7wrapper.
   8"""
   9
  10from __future__ import annotations
  11
  12import math
  13from typing import List, Optional, Sequence
  14
  15import numpy as np
  16from numba import njit
  17
  18try:  # pragma: no cover - optional dependency
  19    import scipy.sparse as sp
  20except ModuleNotFoundError:  # pragma: no cover - optional dependency
  21    sp = None
  22
  23__pdoc__ = {
  24    "NetworkElement": False,
  25    "Neuron": False,
  26    "Synapse": False,
  27}
  28
  29__all__ = [
  30    "BinaryNeuronPopulation",
  31    "BackgroundActivity",
  32    "PairwiseBernoulliSynapse",
  33    "PoissonSynapse",
  34    "FixedIndegreeSynapse",
  35    "AllToAllSynapse",
  36    "BinaryNetwork",
  37    "warm_numba_caches",
  38]
  39
  40_NUMBA_WARMED = False
  41
  42
  43@njit(cache=True)
  44def _dense_batch_kernel(
  45    neurons,
  46    state,
  47    field,
  48    thresholds,
  49    weights,
  50    log_states,
  51    log_enabled,
  52    log_offset,
  53    update_log,
  54    delta_log,
  55    diff_enabled,
  56    diff_offset,
  57):
  58    """
  59    Update neurons in-place for dense weight matrices.
  60    Parameters
  61    ----------
  62    neurons : np.ndarray
  63        Indices that should be updated (int64).
  64    state : np.ndarray
  65        Binary neuron state vector (int8).
  66    field : np.ndarray
  67        Cached synaptic field h = W @ state (float32).
  68    thresholds : np.ndarray
  69        Per-neuron thresholds (float32).
  70    weights : np.ndarray
  71        Dense weight matrix (float32).
  72    """
  73    neuron_count = weights.shape[0]
  74    for idx in range(neurons.size):
  75        neuron = neurons[idx]
  76        old_state = state[neuron]
  77        potential = field[neuron]
  78        new_state = 1 if potential > thresholds[neuron] else 0
  79        delta = 0
  80        if new_state != old_state:
  81            delta = new_state - old_state
  82            state[neuron] = new_state
  83            for target in range(neuron_count):
  84                field[target] += delta * weights[target, neuron]
  85        if diff_enabled:
  86            update_log[diff_offset + idx] = neuron
  87            delta_log[diff_offset + idx] = delta
  88        if log_enabled:
  89            log_states[log_offset + idx, :] = state
  90
  91
  92@njit(cache=True)
  93def _sparse_batch_kernel(
  94    neurons,
  95    state,
  96    field,
  97    thresholds,
  98    data,
  99    indices,
 100    indptr,
 101    log_states,
 102    log_enabled,
 103    log_offset,
 104    update_log,
 105    delta_log,
 106    diff_enabled,
 107    diff_offset,
 108):
 109    """
 110    Update neurons for sparse CSC weights.
 111    Parameters
 112    ----------
 113    data, indices, indptr : np.ndarray
 114        CSC column representation (float32, int32, int64/int32).
 115    """
 116    for idx in range(neurons.size):
 117        neuron = neurons[idx]
 118        old_state = state[neuron]
 119        potential = field[neuron]
 120        new_state = 1 if potential > thresholds[neuron] else 0
 121        delta = 0
 122        if new_state != old_state:
 123            delta = new_state - old_state
 124            state[neuron] = new_state
 125            start = indptr[neuron]
 126            end = indptr[neuron + 1]
 127            for ptr in range(start, end):
 128                row = indices[ptr]
 129                field[row] += delta * data[ptr]
 130        if diff_enabled:
 131            update_log[diff_offset + idx] = neuron
 132            delta_log[diff_offset + idx] = delta
 133        if log_enabled:
 134            log_states[log_offset + idx, :] = state
 135
 136
 137@njit(cache=True)
 138def _reconstruct_states_kernel(initial_state, updates, deltas, stride):
 139    steps = updates.shape[1]
 140    if steps == 0:
 141        return np.zeros((0, initial_state.size), dtype=np.uint8)
 142    sample_count = ((steps - 1) // stride) + 1
 143    state = initial_state.copy()
 144    samples = np.zeros((sample_count, state.size), dtype=np.uint8)
 145    sample_idx = 0
 146    for step in range(steps):
 147        for row in range(updates.shape[0]):
 148            delta = deltas[row, step]
 149            if delta == 0:
 150                continue
 151            unit = updates[row, step]
 152            state_value = int(state[unit]) + int(delta)
 153            state[unit] = 1 if state_value > 0 else 0
 154        if step % stride == 0:
 155            samples[sample_idx, :] = state
 156            sample_idx += 1
 157    return samples
 158
 159
 160@njit(cache=True)
 161def _population_rates_kernel(cluster_sums, sizes, cluster_of, updates, deltas, stride):
 162    steps = updates.shape[1]
 163    pop_count = cluster_sums.size
 164    if steps == 0 or pop_count == 0:
 165        return np.zeros((0, pop_count), dtype=np.float32)
 166    sample_count = ((steps - 1) // stride) + 1
 167    samples = np.zeros((sample_count, pop_count), dtype=np.float32)
 168    running = cluster_sums.copy()
 169    sample_idx = 0
 170    for step in range(steps):
 171        for row in range(updates.shape[0]):
 172            delta = deltas[row, step]
 173            if delta == 0:
 174                continue
 175            unit = updates[row, step]
 176            cluster_idx = cluster_of[unit]
 177            running[cluster_idx] += int(delta)
 178        if step % stride == 0:
 179            for pop_idx in range(pop_count):
 180                samples[sample_idx, pop_idx] = running[pop_idx] / sizes[pop_idx]
 181            sample_idx += 1
 182    return samples
 183
 184
 185def warm_numba_caches() -> None:
 186    """Compile and cache the numba kernels once per process."""
 187    global _NUMBA_WARMED
 188    if _NUMBA_WARMED:
 189        return
 190    neurons = np.array([0], dtype=np.int64)
 191    state = np.zeros(2, dtype=np.int8)
 192    field = np.zeros(2, dtype=np.float32)
 193    thresholds = np.zeros(2, dtype=np.float32)
 194    weights = np.zeros((2, 2), dtype=np.float32)
 195    log_states = np.zeros((1, 2), dtype=np.int8)
 196    update_log = np.zeros(1, dtype=np.uint16)
 197    delta_log = np.zeros(1, dtype=np.int8)
 198    _dense_batch_kernel(
 199        neurons,
 200        state.copy(),
 201        field.copy(),
 202        thresholds,
 203        weights,
 204        log_states,
 205        False,
 206        0,
 207        update_log,
 208        delta_log,
 209        False,
 210        0,
 211    )
 212    _sparse_batch_kernel(
 213        neurons,
 214        state.copy(),
 215        field.copy(),
 216        thresholds,
 217        np.zeros(0, dtype=np.float32),
 218        np.zeros(0, dtype=np.int32),
 219        np.array([0, 0], dtype=np.int32),
 220        log_states,
 221        False,
 222        0,
 223        update_log,
 224        delta_log,
 225        False,
 226        0,
 227    )
 228    _reconstruct_states_kernel(
 229        np.zeros(2, dtype=np.int8),
 230        np.zeros((1, 1), dtype=np.int64),
 231        np.zeros((1, 1), dtype=np.int8),
 232        1,
 233    )
 234    _population_rates_kernel(
 235        np.zeros(1, dtype=np.int64),
 236        np.ones(1, dtype=np.float32),
 237        np.zeros(2, dtype=np.int32),
 238        np.zeros((1, 1), dtype=np.int64),
 239        np.zeros((1, 1), dtype=np.int8),
 240        1,
 241    )
 242    _NUMBA_WARMED = True
 243
 244
 245def _compress_flat_samples(flat: np.ndarray, total_pairs: int) -> tuple[np.ndarray, np.ndarray]:
 246    flat = np.asarray(flat, dtype=np.int64).ravel()
 247    if flat.size == 0:
 248        return np.zeros(0, dtype=np.int64), np.zeros(0, dtype=np.int64)
 249    if total_pairs <= 2_000_000:
 250        counts = np.bincount(flat, minlength=total_pairs)
 251        unique = np.flatnonzero(counts)
 252        return unique.astype(np.int64, copy=False), counts[unique].astype(np.int64, copy=False)
 253    unique, counts = np.unique(flat, return_counts=True)
 254    return unique.astype(np.int64, copy=False), counts.astype(np.int64, copy=False)
 255
 256
 257class NetworkElement:
 258    """Base class for network elements that occupy a slice in the global state."""
 259    def __init__(self, reference, name="Some Network Element"):
 260        self.name = name
 261        self.reference = reference
 262        self.view = None
 263
 264    def set_view(self, view):
 265        self.view = np.asarray(view, dtype=np.int64)
 266
 267    def initialze(self):
 268        pass
 269
 270
 271class Neuron(NetworkElement):
 272    """Base class for neuron populations stored in contiguous slices."""
 273    def __init__(self, reference, N=1, name="Some Neuron", tau=1.0):
 274        super().__init__(reference, name)
 275        self.N = N
 276        self.state = None
 277        self.tau = tau
 278
 279    def update(self):
 280        pass
 281
 282    def set_view(self, view):
 283        super().set_view(view)
 284        self.state = self.reference.state[self.view[0]:self.view[1]]
 285
 286    def initialze(self):
 287        self.reference.state[self.view[0]:self.view[1]] = self._initial_state()
 288
 289
 290class BinaryNeuronPopulation(Neuron):
 291    """Binary neuron population with threshold activation and configurable initializer.
 292
 293    Examples
 294    --------
 295    >>> net = BinaryNetwork("demo")
 296    >>> pop = BinaryNeuronPopulation(net, N=3, threshold=0.5, initializer=0)
 297    >>> net.add_population(pop)
 298    <...BinaryNeuronPopulation object...>
 299    """
 300    def __init__(
 301        self,
 302        reference,
 303        N=1,
 304        threshold=1.0,
 305        name="Binary Neuron Population",
 306        tau=1.0,
 307        initializer=None,
 308        **kwargs,
 309    ):
 310        super().__init__(reference, N, name, tau=tau)
 311        self.threshold = float(threshold)
 312        self.initializer = initializer
 313
 314    def update(self, weights=None, state=None, index=None, input_value=None, **kwargs):
 315        if input_value is None:
 316            if weights is None or state is None:
 317                raise ValueError("weights and state have to be provided when input_value is not set")
 318            input_value = np.sum(weights * state)
 319        return np.heaviside(input_value - self.threshold, 0)
 320
 321    def _initial_state(self):
 322        if callable(self.initializer):
 323            values = np.asarray(self.initializer(self.N), dtype=np.int16)
 324            if values.size != self.N:
 325                raise ValueError("Initializer must return {} entries".format(self.N))
 326            return values
 327        if self.initializer is None:
 328            return np.random.choice([0, 1], size=self.N, p=[0.8, 0.2])
 329        arr = np.asarray(self.initializer)
 330        if arr.size == 1:
 331            return np.full(self.N, int(arr.item()), dtype=np.int16)
 332        if arr.size != self.N:
 333            raise ValueError("Initializer must define exactly {} elements".format(self.N))
 334        return arr.astype(np.int16)
 335
 336
 337class BackgroundActivity(Neuron):
 338    """Background population that provides a constant or stochastic drive."""
 339    def __init__(self, reference, N=1, Activity=0.5, Stochastic=False, name="Background Activity", tau=1.0):
 340        super().__init__(reference, N, name, tau=tau)
 341        self.Activity = Activity
 342        if Stochastic:
 343            self.update = self.update_stochastic
 344        else:
 345            self.update = self.update_deterministic
 346
 347    def update_stochastic(self, weights=None, state=None, Index=None, **kwargs):
 348        return np.random.choice([0, 1], 1) * self.update_deterministic(weights, state, **kwargs)
 349
 350    def update_deterministic(self, weights=None, state=None, Index=None, **kwargs):
 351        # if activity is a float, set all neurons to this activity
 352        if isinstance(self.Activity, float):
 353            return self.Activity
 354        # if activity is a function, set neurons by this function
 355        elif callable(self.Activity):
 356            return self.Activity()
 357        else:
 358            return 1.0
 359
 360    def initialze(self):
 361        self.state = np.array([self.update() for _ in range(self.N)])
 362
 363
 364class Synapse(NetworkElement):
 365    """Base class for synapse objects that write weight blocks into the network."""
 366    def __init__(self, reference, pre, post, name="Some Synapse"):
 367        super().__init__(reference, name=post.name + " <- " + pre.name)
 368        self.pre = pre
 369        self.post = post
 370
 371    def set_view(self, view):
 372        super().set_view(view)
 373
 374    def _write_block(self, block: np.ndarray):
 375        """Add a dense block to the global weight matrix (dense or sparse backend)."""
 376        post_start, post_end = int(self.view[1, 0]), int(self.view[1, 1])
 377        pre_start, pre_end = int(self.view[0, 0]), int(self.view[0, 1])
 378        self.reference._write_weight_block(post_start, post_end, pre_start, pre_end, block)
 379
 380    def _append_sparse_entries(self, rows: np.ndarray, cols: np.ndarray, values: np.ndarray) -> None:
 381        if self.reference.weight_mode != "sparse":
 382            raise RuntimeError("Sparse entries can only be appended in sparse weight mode.")
 383        if rows.size == 0:
 384            return
 385        post_start = int(self.view[1, 0])
 386        pre_start = int(self.view[0, 0])
 387        self.reference._append_sparse_entries(
 388            np.asarray(rows, dtype=np.int64) + post_start,
 389            np.asarray(cols, dtype=np.int64) + pre_start,
 390            np.asarray(values, dtype=self.reference.weight_dtype),
 391        )
 392
 393    def initialze(self):
 394        raise NotImplementedError
 395
 396
 397class PairwiseBernoulliSynapse(Synapse):
 398    """Sample independent Bernoulli connections for every pre/post pair.
 399
 400    Expected output
 401    ---------------
 402    After `network.initialize(...)`, the addressed weight block contains zeros
 403    and `j`-valued entries sampled independently with probability `p`.
 404    """
 405    def __init__(self, reference, pre, post, p=0.5, j=1.0):
 406        super().__init__(reference, pre, post)
 407        self.p = float(p)
 408        self.j = float(j)
 409
 410    def initialze(self):
 411        p = self.p
 412        iterations = 1
 413        while p > 1:
 414            p /= 2.0
 415            iterations += 1
 416        if self.reference.weight_mode == "sparse":
 417            total_pairs = self.post.N * self.pre.N
 418            if total_pairs == 0 or p <= 0.0:
 419                return
 420            data_chunks: List[np.ndarray] = []
 421            row_chunks: List[np.ndarray] = []
 422            col_chunks: List[np.ndarray] = []
 423            for _ in range(iterations):
 424                sample_count = int(np.random.binomial(total_pairs, p))
 425                if sample_count <= 0:
 426                    continue
 427                if sample_count >= total_pairs:
 428                    flat = np.arange(total_pairs, dtype=np.int64)
 429                else:
 430                    flat = np.random.choice(total_pairs, size=sample_count, replace=False).astype(np.int64, copy=False)
 431                rows = flat // self.pre.N
 432                cols = flat % self.pre.N
 433                row_chunks.append(rows)
 434                col_chunks.append(cols)
 435                data_chunks.append(np.full(sample_count, self.j, dtype=self.reference.weight_dtype))
 436            if data_chunks:
 437                self._append_sparse_entries(
 438                    np.concatenate(row_chunks),
 439                    np.concatenate(col_chunks),
 440                    np.concatenate(data_chunks),
 441                )
 442            return
 443        shape = (self.post.N, self.pre.N)
 444        block = np.zeros(shape, dtype=self.reference.weight_dtype)
 445        for _ in range(iterations):
 446            draws = (np.random.random(size=shape) < p).astype(block.dtype, copy=False)
 447            block += draws * self.j
 448        self._write_block(block)
 449
 450
 451class PoissonSynapse(Synapse):
 452    """Sample Poisson-distributed multi-edge counts per pre/post pair.
 453
 454    Expected output
 455    ---------------
 456    The addressed weight block contains non-negative multiples of `j` with
 457    Poisson-distributed counts.
 458    """
 459    def __init__(self, reference, pre, post, rate=0.5, j=1.0):
 460        super().__init__(reference, pre, post)
 461        self.rate = float(rate)
 462        self.j = float(j)
 463
 464    def initialze(self):
 465        if self.reference.weight_mode == "sparse":
 466            total_pairs = self.post.N * self.pre.N
 467            if total_pairs == 0 or self.rate <= 0.0:
 468                return
 469            sample_count = int(np.random.poisson(lam=self.rate * total_pairs))
 470            if sample_count <= 0:
 471                return
 472            flat = np.random.randint(total_pairs, size=sample_count, dtype=np.int64)
 473            unique, counts = _compress_flat_samples(flat, total_pairs)
 474            rows = unique // self.pre.N
 475            cols = unique % self.pre.N
 476            values = counts.astype(self.reference.weight_dtype, copy=False) * self.j
 477            self._append_sparse_entries(rows, cols, values)
 478            return
 479        shape = (self.post.N, self.pre.N)
 480        samples = np.random.poisson(lam=self.rate, size=shape).astype(self.reference.weight_dtype, copy=False)
 481        self._write_block(samples * self.j)
 482
 483
 484class FixedIndegreeSynapse(Synapse):
 485    """Connect each target neuron to a fixed number of randomly drawn inputs.
 486
 487    Expected output
 488    ---------------
 489    Each target row receives approximately `round(p * N_pre)` incoming entries.
 490    When `multapses` is `True`, presynaptic partners are sampled with replacement.
 491    When `multapses` is `False`, partners are sampled without replacement.
 492    """
 493    def __init__(self, reference, pre, post, p=0.5, j=1.0, multapses=True):
 494        super().__init__(reference, pre, post)
 495        self.p = float(p)
 496        self.j = float(j)
 497        self.multapses = bool(multapses)
 498
 499    def initialze(self):
 500        p = max(self.p, 0.0)
 501        target_count = int(round(p * self.pre.N))
 502        target_count = max(target_count, 0)
 503        if target_count == 0:
 504            return
 505        if (not self.multapses) and target_count > self.pre.N:
 506            raise ValueError(
 507                f"Fixed-indegree sampling without multapses requested indegree {target_count} "
 508                f"from only {self.pre.N} presynaptic neurons."
 509            )
 510        if self.reference.weight_mode == "sparse":
 511            rows = np.repeat(np.arange(self.post.N, dtype=np.int64), target_count)
 512            if self.multapses:
 513                cols = np.random.randint(self.pre.N, size=self.post.N * target_count, dtype=np.int64)
 514            else:
 515                col_chunks = [
 516                    np.random.choice(self.pre.N, size=target_count, replace=False).astype(np.int64, copy=False)
 517                    for _ in range(self.post.N)
 518                ]
 519                cols = np.concatenate(col_chunks) if col_chunks else np.zeros(0, dtype=np.int64)
 520            values = np.full(rows.size, self.j, dtype=self.reference.weight_dtype)
 521            self._append_sparse_entries(rows, cols, values)
 522            return
 523        block = np.zeros((self.post.N, self.pre.N), dtype=self.reference.weight_dtype)
 524        for tgt in range(self.post.N):
 525            pres = np.random.choice(self.pre.N, size=target_count, replace=self.multapses)
 526            np.add.at(block[tgt], pres, self.j)
 527        self._write_block(block)
 528
 529
 530class AllToAllSynapse(Synapse):
 531    """Dense all-to-all block with constant weight value.
 532
 533    Expected output
 534    ---------------
 535    The addressed weight block is filled with the constant value `j`.
 536    """
 537    def __init__(self, reference, pre, post, j=1.0):
 538        super().__init__(reference, pre, post)
 539        self.j = float(j)
 540
 541    def initialze(self):
 542        if self.reference.weight_mode == "sparse":
 543            total_pairs = self.post.N * self.pre.N
 544            if total_pairs == 0:
 545                return
 546            rows = np.repeat(np.arange(self.post.N, dtype=np.int64), self.pre.N)
 547            cols = np.tile(np.arange(self.pre.N, dtype=np.int64), self.post.N)
 548            values = np.full(total_pairs, self.j, dtype=self.reference.weight_dtype)
 549            self._append_sparse_entries(rows, cols, values)
 550            return
 551        block = np.full((self.post.N, self.pre.N), self.j, dtype=self.reference.weight_dtype)
 552        self._write_block(block)
 553
 554
 555class BinaryNetwork:
 556    """Binary network simulator with dense/sparse backends and diff-log tracing.
 557
 558    Examples
 559    --------
 560    >>> np.random.seed(0)
 561    >>> net = BinaryNetwork("demo")
 562    >>> exc = BinaryNeuronPopulation(net, N=2, threshold=0.5, tau=5.0, initializer=[0, 1])
 563    >>> inh = BinaryNeuronPopulation(net, N=1, threshold=0.5, tau=5.0, initializer=[0])
 564    >>> net.add_population(exc)
 565    <...BinaryNeuronPopulation object...>
 566    >>> net.add_population(inh)
 567    <...BinaryNeuronPopulation object...>
 568    >>> net.add_synapse(AllToAllSynapse(net, exc, exc, j=0.6))
 569    >>> net.initialize(weight_mode="dense", autapse=False)
 570    >>> net.state.shape
 571    (3,)
 572    """
 573    def __init__(self, name="Some Binary Network"):
 574        self.name = name
 575        self.N = 0
 576        self.population: List[Neuron] = []
 577        self.synapses: List[Synapse] = []
 578        self.state: Optional[np.ndarray] = None
 579        self.weights_dense: Optional[np.ndarray] = None
 580        self.weights_csr = None
 581        self.weights_csc = None
 582        self.weights = None  # compatibility alias
 583        self.LUT = None  # look up table for the update function
 584        self.sim_steps = 0
 585        self.population_lookup = None
 586        self.neuron_lookup = None
 587        self.update_prob = None
 588        self.thresholds = None
 589        self.field = None
 590        self.weight_mode = "dense"
 591        self.weight_dtype = np.float32
 592        self._population_views = np.zeros((0, 2), dtype=np.int64)
 593        self._population_cdf = np.zeros(0, dtype=np.float64)
 594        self._sparse_rows: List[np.ndarray] = []
 595        self._sparse_cols: List[np.ndarray] = []
 596        self._sparse_data: List[np.ndarray] = []
 597        self._step_log_buffer: Optional[np.ndarray] = None
 598        self._step_log_index = 0
 599        self._step_log_dummy = np.zeros((0, 0), dtype=np.int8)
 600        self._diff_log_updates: Optional[np.ndarray] = None
 601        self._diff_log_deltas: Optional[np.ndarray] = None
 602        self._diff_log_index = 0
 603        self._diff_log_dummy_updates = np.zeros(0, dtype=np.uint16)
 604        self._diff_log_dummy_deltas = np.zeros(0, dtype=np.int8)
 605
 606    def add_population(self, population: Neuron):
 607        self.population.append(population)
 608        self.N += population.N
 609        return population
 610
 611    def add_synapse(self, synapse: Synapse):
 612        self.synapses.append(synapse)
 613
 614    def initialize(
 615        self,
 616        autapse: bool = False,
 617        weight_mode: str = "auto",
 618        ram_budget_gb: float = 12.0,
 619        weight_dtype=np.float32,
 620    ):
 621        """Allocate state, sample connectivity, and prepare the cached input field.
 622
 623        Examples
 624        --------
 625        >>> np.random.seed(1)
 626        >>> net = BinaryNetwork("init-demo")
 627        >>> pop = BinaryNeuronPopulation(net, N=2, threshold=0.1, initializer=[1, 0])
 628        >>> net.add_population(pop)
 629        <...BinaryNeuronPopulation object...>
 630        >>> net.initialize(weight_mode="dense")
 631        >>> net.field.shape
 632        (2,)
 633        """
 634        if self.N == 0:
 635            raise RuntimeError("Cannot initialize network without populations.")
 636        self.weight_dtype = np.dtype(weight_dtype)
 637        if self.weight_dtype not in (np.float32, np.float64):
 638            raise ValueError("weight_dtype must be float32 or float64.")
 639        self.state = np.zeros(self.N, dtype=np.int8)
 640        self.field = np.zeros(self.N, dtype=self.weight_dtype)
 641        self.update_prob = np.zeros(self.N, dtype=self.weight_dtype)
 642        self.population_lookup = np.zeros(self.N, dtype=np.int32)
 643        self.neuron_lookup = np.zeros(self.N, dtype=np.int32)
 644        self.thresholds = np.zeros(self.N, dtype=self.weight_dtype)
 645        pop_count = len(self.population)
 646        pop_views = np.zeros((pop_count, 2), dtype=np.int64)
 647        pop_update_mass = np.zeros(pop_count, dtype=np.float64)
 648        N_start = 0
 649        for idx, population in enumerate(self.population):
 650            population.set_view([N_start, N_start + population.N])
 651            N_start += population.N
 652            population.initialze()
 653            pop_views[idx, :] = population.view
 654            pop_update_mass[idx] = population.N / max(population.tau, 1e-9)
 655            self.update_prob[population.view[0]:population.view[1]] = 1.0 / max(population.tau, 1e-9)
 656            self.population_lookup[population.view[0]:population.view[1]] = idx
 657            self.neuron_lookup[population.view[0]:population.view[1]] = np.arange(
 658                population.N, dtype=np.int32
 659            )
 660            threshold_value = getattr(population, "threshold", 0.0)
 661            self.thresholds[population.view[0]:population.view[1]] = float(threshold_value)
 662        self.LUT = np.array([population.view for population in self.population])
 663        total = float(self.update_prob.sum())
 664        if not math.isfinite(total) or total <= 0:
 665            raise RuntimeError("Invalid update probabilities. Check tau values.")
 666        self.update_prob /= total
 667        pop_mass_total = float(pop_update_mass.sum())
 668        if not math.isfinite(pop_mass_total) or pop_mass_total <= 0:
 669            raise RuntimeError("Invalid population update masses. Check tau values.")
 670        self._population_views = pop_views
 671        self._population_cdf = np.cumsum(pop_update_mass / pop_mass_total)
 672        self.weight_mode = self._choose_weight_mode(weight_mode, ram_budget_gb)
 673        self.weights_dense = None
 674        self.weights_csr = None
 675        self.weights_csc = None
 676        self.weights = None
 677        self._sparse_rows.clear()
 678        self._sparse_cols.clear()
 679        self._sparse_data.clear()
 680        if self.weight_mode == "dense":
 681            self.weights_dense = np.zeros((self.N, self.N), dtype=self.weight_dtype)
 682        else:
 683            if sp is None:
 684                raise ModuleNotFoundError(
 685                    "SciPy is required for sparse weight mode. Install it via 'pip install scipy'."
 686                )
 687        for synapse in self.synapses:
 688            synapse.set_view(
 689                np.array(
 690                    [[synapse.pre.view[0], synapse.pre.view[1]], [synapse.post.view[0], synapse.post.view[1]]],
 691                    dtype=np.int64,
 692                )
 693            )
 694            synapse.initialze()
 695        if self.weight_mode == "dense":
 696            if not autapse:
 697                np.fill_diagonal(self.weights_dense, 0.0)
 698            self.weights = self.weights_dense
 699        else:
 700            row = np.concatenate(self._sparse_rows) if self._sparse_rows else np.zeros(0, dtype=np.int64)
 701            col = np.concatenate(self._sparse_cols) if self._sparse_cols else np.zeros(0, dtype=np.int64)
 702            data = np.concatenate(self._sparse_data) if self._sparse_data else np.zeros(0, dtype=self.weight_dtype)
 703            if not autapse and row.size:
 704                keep = row != col
 705                row = row[keep]
 706                col = col[keep]
 707                data = data[keep]
 708            matrix = sp.coo_matrix((data, (row, col)), shape=(self.N, self.N), dtype=self.weight_dtype)
 709            self.weights_csr = matrix.tocsr()
 710            self.weights_csc = matrix.tocsc()
 711            self.weights = self.weights_csr
 712        self._recompute_field()
 713        self.sim_steps = 0
 714
 715    def _recompute_field(self):
 716        state_float = self.state.astype(self.weight_dtype, copy=False)
 717        if self.weight_mode == "dense":
 718            self.field = self.weights_dense @ state_float
 719        else:
 720            self.field = self.weights_csr.dot(state_float).astype(self.weight_dtype, copy=False)
 721
 722    def _write_weight_block(self, row_start, row_end, col_start, col_end, block):
 723        block = np.asarray(block, dtype=self.weight_dtype)
 724        if block.shape != (row_end - row_start, col_end - col_start):
 725            raise ValueError("Block shape does not match target slice.")
 726        if self.weight_mode == "dense":
 727            self.weights_dense[row_start:row_end, col_start:col_end] += block
 728        else:
 729            rows = np.arange(row_start, row_end, dtype=np.int64)
 730            cols = np.arange(col_start, col_end, dtype=np.int64)
 731            row_idx = np.repeat(rows, block.shape[1])
 732            col_idx = np.tile(cols, block.shape[0])
 733            values = block.reshape(-1)
 734            mask = values != 0.0
 735            if mask.any():
 736                self._sparse_rows.append(row_idx[mask])
 737                self._sparse_cols.append(col_idx[mask])
 738                self._sparse_data.append(values[mask])
 739
 740    def _append_sparse_entries(self, row_idx: np.ndarray, col_idx: np.ndarray, values: np.ndarray) -> None:
 741        if self.weight_mode != "sparse":
 742            raise RuntimeError("Sparse entries can only be appended in sparse weight mode.")
 743        rows = np.asarray(row_idx, dtype=np.int64).ravel()
 744        cols = np.asarray(col_idx, dtype=np.int64).ravel()
 745        data = np.asarray(values, dtype=self.weight_dtype).ravel()
 746        if rows.size != cols.size or rows.size != data.size:
 747            raise ValueError("Sparse row/col/data arrays must have the same length.")
 748        if rows.size == 0:
 749            return
 750        mask = data != 0.0
 751        if mask.any():
 752            self._sparse_rows.append(rows[mask])
 753            self._sparse_cols.append(cols[mask])
 754            self._sparse_data.append(data[mask])
 755
 756    def _choose_weight_mode(self, requested: str, ram_budget_gb: float) -> str:
 757        requested = str(requested or "auto").lower()
 758        if requested in {"dense", "sparse"}:
 759            return requested
 760        if requested != "auto":
 761            raise ValueError("weight_mode must be 'dense', 'sparse', or 'auto'.")
 762        # Estimate dense memory footprint assuming float32.
 763        bytes_per_entry = np.dtype(self.weight_dtype).itemsize
 764        dense_bytes = self.N * self.N * bytes_per_entry
 765        dense_gb = dense_bytes / (1024 ** 3)
 766        safety = 0.6 * float(ram_budget_gb)
 767        print("Simulating network as: " + "dense" if dense_gb <= safety else "sparse")
 768        return "dense" if dense_gb <= safety else "sparse"
 769
 770    def _select_neurons(self, count: int) -> np.ndarray:
 771        if count <= 0:
 772            return np.zeros(0, dtype=np.int64)
 773        if self._population_cdf.size == 0:
 774            return np.random.choice(self.N, size=count, p=self.update_prob)
 775        pop_indices = np.searchsorted(self._population_cdf, np.random.random(size=count), side="right")
 776        pop_indices = np.minimum(pop_indices, self._population_views.shape[0] - 1)
 777        counts = np.bincount(pop_indices, minlength=self._population_views.shape[0])
 778        neurons = np.empty(count, dtype=np.int64)
 779        offset = 0
 780        for pop_idx, pop_count in enumerate(counts):
 781            if pop_count == 0:
 782                continue
 783            start, end = self._population_views[pop_idx]
 784            neurons[offset:offset + pop_count] = np.random.randint(start, end, size=pop_count)
 785            offset += pop_count
 786        np.random.shuffle(neurons)
 787        return neurons
 788
 789    def _update_batch_dense(self, neurons: np.ndarray, log_states, log_enabled: bool, log_offset: int):
 790        if neurons.size == 0:
 791            return
 792        diff_enabled = self._diff_log_updates is not None and self._diff_log_deltas is not None
 793        if diff_enabled:
 794            diff_updates = self._diff_log_updates
 795            diff_deltas = self._diff_log_deltas
 796            diff_offset = self._diff_log_index
 797        else:
 798            diff_updates = self._diff_log_dummy_updates
 799            diff_deltas = self._diff_log_dummy_deltas
 800            diff_offset = 0
 801        _dense_batch_kernel(
 802            neurons,
 803            self.state,
 804            self.field,
 805            self.thresholds,
 806            self.weights_dense,
 807            log_states,
 808            log_enabled,
 809            log_offset,
 810            diff_updates,
 811            diff_deltas,
 812            diff_enabled,
 813            diff_offset,
 814        )
 815
 816    def _update_batch_sparse(self, neurons: np.ndarray, log_states, log_enabled: bool, log_offset: int):
 817        if neurons.size == 0:
 818            return
 819        diff_enabled = self._diff_log_updates is not None and self._diff_log_deltas is not None
 820        if diff_enabled:
 821            diff_updates = self._diff_log_updates
 822            diff_deltas = self._diff_log_deltas
 823            diff_offset = self._diff_log_index
 824        else:
 825            diff_updates = self._diff_log_dummy_updates
 826            diff_deltas = self._diff_log_dummy_deltas
 827            diff_offset = 0
 828        _sparse_batch_kernel(
 829            neurons,
 830            self.state,
 831            self.field,
 832            self.thresholds,
 833            self.weights_csc.data,
 834            self.weights_csc.indices,
 835            self.weights_csc.indptr,
 836            log_states,
 837            log_enabled,
 838            log_offset,
 839            diff_updates,
 840            diff_deltas,
 841            diff_enabled,
 842            diff_offset,
 843        )
 844
 845    def update(self):
 846        neuron = self._select_neurons(1)
 847        self._process_batch(neuron)
 848
 849    def _process_batch(self, neurons: Sequence[int]):
 850        neurons = np.asarray(neurons, dtype=np.int64)
 851        log_enabled = self._step_log_buffer is not None
 852        log_offset = self._step_log_index
 853        if log_enabled and self._step_log_buffer is not None:
 854            if log_offset + neurons.size > self._step_log_buffer.shape[0]:
 855                raise RuntimeError(
 856                    "Step logging buffer exhausted. Increase allocated steps when enabling step logging."
 857                )
 858            log_buffer = self._step_log_buffer
 859        else:
 860            log_buffer = self._step_log_dummy
 861        if self._diff_log_updates is not None and self._diff_log_deltas is not None:
 862            if self._diff_log_index + neurons.size > self._diff_log_updates.shape[0]:
 863                raise RuntimeError("Diff logging buffer exhausted. Increase allocated steps when enabling diff logging.")
 864        if self.weight_mode == "dense":
 865            self._update_batch_dense(neurons, log_buffer, log_enabled, log_offset)
 866        else:
 867            self._update_batch_sparse(neurons, log_buffer, log_enabled, log_offset)
 868        if log_enabled:
 869            self._step_log_index += neurons.size
 870        if self._diff_log_updates is not None and self._diff_log_deltas is not None:
 871            self._diff_log_index += neurons.size
 872        self.sim_steps += neurons.size
 873
 874    def run(self, steps=1000, batch_size=1):
 875        """Advance the network for `steps` asynchronous updates.
 876
 877        Examples
 878        --------
 879        >>> np.random.seed(2)
 880        >>> net = BinaryNetwork("run-demo")
 881        >>> pop = BinaryNeuronPopulation(net, N=2, threshold=0.1, initializer=[1, 0])
 882        >>> net.add_population(pop)
 883        <...BinaryNeuronPopulation object...>
 884        >>> net.initialize(weight_mode="dense")
 885        >>> net.run(steps=4, batch_size=2)
 886        >>> int(net.sim_steps)
 887        4
 888        """
 889        if self.state is None or self.update_prob is None:
 890            raise RuntimeError("Call initialize() before run().")
 891        if batch_size <= 0:
 892            raise ValueError("batch_size must be positive.")
 893        steps_done = 0
 894        while steps_done < steps:
 895            current_batch = min(batch_size, steps - steps_done)
 896            neurons = self._select_neurons(current_batch)
 897            self._process_batch(neurons)
 898            steps_done += current_batch
 899
 900    def enable_step_logging(self, steps: int):
 901        steps = int(max(0, steps))
 902        if steps == 0:
 903            self._step_log_buffer = None
 904            self._step_log_index = 0
 905            return
 906        self._step_log_buffer = np.zeros((steps, self.N), dtype=np.int8)
 907        self._step_log_index = 0
 908
 909    def consume_step_log(self) -> np.ndarray:
 910        if self._step_log_buffer is None:
 911            return np.zeros((0, self.N), dtype=np.int8)
 912        filled = min(self._step_log_index, self._step_log_buffer.shape[0])
 913        data = self._step_log_buffer[:filled].copy()
 914        self._step_log_buffer = None
 915        self._step_log_index = 0
 916        return data
 917
 918    def enable_diff_logging(self, steps: int):
 919        """Allocate a diff-log buffer for `steps` asynchronous updates.
 920
 921        Examples
 922        --------
 923        >>> net = BinaryNetwork("log-demo")
 924        >>> pop = BinaryNeuronPopulation(net, N=1, threshold=0.0, initializer=[0])
 925        >>> net.add_population(pop)
 926        <...BinaryNeuronPopulation object...>
 927        >>> net.initialize(weight_mode="dense")
 928        >>> net.enable_diff_logging(steps=3)
 929        >>> net._diff_log_updates.shape
 930        (3,)
 931        """
 932        steps = int(max(0, steps))
 933        if steps == 0:
 934            self._diff_log_updates = None
 935            self._diff_log_deltas = None
 936            self._diff_log_index = 0
 937            return
 938        self._diff_log_updates = np.zeros(steps, dtype=np.uint16)
 939        self._diff_log_deltas = np.zeros(steps, dtype=np.int8)
 940        self._diff_log_index = 0
 941
 942    def consume_diff_log(self) -> tuple[np.ndarray, np.ndarray]:
 943        """Return the recorded diff log as `(updates, deltas)` row arrays.
 944
 945        Expected output
 946        ---------------
 947        The returned arrays have shape `(1, recorded_steps)` for the current
 948        simulation backend.
 949        """
 950        if self._diff_log_updates is None or self._diff_log_deltas is None:
 951            return np.zeros((1, 0), dtype=np.uint16), np.zeros((1, 0), dtype=np.int8)
 952        filled = min(self._diff_log_index, self._diff_log_updates.shape[0])
 953        updates = self._diff_log_updates[:filled].copy()[None, :]
 954        deltas = self._diff_log_deltas[:filled].copy()[None, :]
 955        self._diff_log_updates = None
 956        self._diff_log_deltas = None
 957        self._diff_log_index = 0
 958        return updates, deltas
 959
 960    @staticmethod
 961    def reconstruct_states_from_diff_logs(
 962        initial_state: np.ndarray,
 963        updates: np.ndarray,
 964        deltas: np.ndarray,
 965        *,
 966        sample_interval: int = 1,
 967    ) -> np.ndarray:
 968        """Reconstruct sampled full network states from diff-log traces.
 969
 970        Examples
 971        --------
 972        >>> BinaryNetwork.reconstruct_states_from_diff_logs(
 973        ...     initial_state=np.array([0, 0], dtype=np.uint8),
 974        ...     updates=np.array([[0, 1, 0]], dtype=np.uint16),
 975        ...     deltas=np.array([[1, 1, -1]], dtype=np.int8),
 976        ... )
 977        array([[1, 0],
 978               [1, 1],
 979               [0, 1]], dtype=uint8)
 980        """
 981        update_arr = np.asarray(updates, dtype=np.int64)
 982        delta_arr = np.asarray(deltas, dtype=np.int8)
 983        if update_arr.ndim == 1:
 984            update_arr = update_arr[None, :]
 985        if delta_arr.ndim == 1:
 986            delta_arr = delta_arr[None, :]
 987        state = np.asarray(initial_state, dtype=np.int8).ravel().copy()
 988        if update_arr.ndim != 2 or delta_arr.shape != update_arr.shape:
 989            return np.zeros((0, state.size), dtype=np.uint8)
 990        stride = max(1, int(sample_interval))
 991        return _reconstruct_states_kernel(state, update_arr, delta_arr, stride)
 992
 993    def population_rates_from_diff_logs(
 994        self,
 995        initial_state: np.ndarray,
 996        updates: np.ndarray,
 997        deltas: np.ndarray,
 998        *,
 999        sample_interval: int = 1,
1000        populations: Optional[Sequence[Neuron]] = None,
1001    ) -> np.ndarray:
1002        """Compute per-population activity traces directly from diff logs.
1003
1004        Examples
1005        --------
1006        >>> net = BinaryNetwork("rates-demo")
1007        >>> pop_a = BinaryNeuronPopulation(net, N=1, initializer=[0])
1008        >>> pop_b = BinaryNeuronPopulation(net, N=1, initializer=[0])
1009        >>> net.add_population(pop_a)
1010        <...BinaryNeuronPopulation object...>
1011        >>> net.add_population(pop_b)
1012        <...BinaryNeuronPopulation object...>
1013        >>> net.initialize(weight_mode="dense")
1014        >>> rates = net.population_rates_from_diff_logs(
1015        ...     np.array([0, 0], dtype=np.uint8),
1016        ...     np.array([[0, 1, 0]], dtype=np.uint16),
1017        ...     np.array([[1, 1, -1]], dtype=np.int8),
1018        ... )
1019        >>> rates.tolist()
1020        [[1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]
1021        """
1022        pops = list(self.population if populations is None else populations)
1023        if not pops:
1024            return np.zeros((0, 0), dtype=np.float32)
1025        update_arr = np.asarray(updates, dtype=np.int64)
1026        delta_arr = np.asarray(deltas, dtype=np.int8)
1027        if update_arr.ndim == 1:
1028            update_arr = update_arr[None, :]
1029        if delta_arr.ndim == 1:
1030            delta_arr = delta_arr[None, :]
1031        if update_arr.ndim != 2 or delta_arr.shape != update_arr.shape:
1032            return np.zeros((0, len(pops)), dtype=np.float32)
1033        initial = np.asarray(initial_state, dtype=np.int8).ravel()
1034        pop_count = len(pops)
1035        sizes = np.zeros(pop_count, dtype=np.float32)
1036        cluster_of = np.empty(initial.size, dtype=np.int32)
1037        cluster_sums = np.zeros(pop_count, dtype=np.int64)
1038        for idx, pop in enumerate(pops):
1039            start, end = int(pop.view[0]), int(pop.view[1])
1040            cluster_of[start:end] = idx
1041            sizes[idx] = max(1, end - start)
1042            cluster_sums[idx] = int(initial[start:end].sum())
1043        stride = max(1, int(sample_interval))
1044        return _population_rates_kernel(cluster_sums, sizes, cluster_of, update_arr, delta_arr, stride)
1045
1046    @staticmethod
1047    def extract_spike_events_from_diff_logs(
1048        updates: np.ndarray,
1049        deltas: np.ndarray,
1050    ) -> tuple[np.ndarray, np.ndarray]:
1051        """Extract onset events `(times, neuron_ids)` from diff-log traces.
1052
1053        Examples
1054        --------
1055        >>> times, ids = BinaryNetwork.extract_spike_events_from_diff_logs(
1056        ...     np.array([[0, 1, 0]], dtype=np.uint16),
1057        ...     np.array([[1, 1, -1]], dtype=np.int8),
1058        ... )
1059        >>> times.tolist()
1060        [0.0, 1.0]
1061        >>> ids.tolist()
1062        [0, 1]
1063        """
1064        update_arr = np.asarray(updates, dtype=np.int64)
1065        delta_arr = np.asarray(deltas, dtype=np.int8)
1066        if update_arr.ndim == 1:
1067            update_arr = update_arr[None, :]
1068        if delta_arr.ndim == 1:
1069            delta_arr = delta_arr[None, :]
1070        if update_arr.ndim != 2 or delta_arr.shape != update_arr.shape:
1071            return np.zeros(0, dtype=np.float64), np.zeros(0, dtype=np.int64)
1072        mask = delta_arr > 0
1073        if not mask.any():
1074            return np.zeros(0, dtype=np.float64), np.zeros(0, dtype=np.int64)
1075        per_step, steps = update_arr.shape
1076        times = np.repeat(np.arange(steps, dtype=np.int64), per_step)
1077        flat_updates = update_arr.reshape(-1, order="F")
1078        flat_mask = mask.reshape(-1, order="F")
1079        return times[flat_mask].astype(np.float64, copy=False), flat_updates[flat_mask].astype(np.int64, copy=False)
1080
1081
1082def _build_demo_network(weight_mode: str) -> BinaryNetwork:
1083    net = BinaryNetwork(f"demo-{weight_mode}")
1084    pop_e = BinaryNeuronPopulation(net, N=4, threshold=0.5, tau=5.0)
1085    pop_i = BinaryNeuronPopulation(net, N=4, threshold=0.5, tau=5.0)
1086    net.add_population(pop_e)
1087    net.add_population(pop_i)
1088    net.add_synapse(PairwiseBernoulliSynapse(net, pop_e, pop_e, p=0.2, j=1.0))
1089    net.add_synapse(PairwiseBernoulliSynapse(net, pop_i, pop_i, p=0.2, j=1.0))
1090    net.add_synapse(PairwiseBernoulliSynapse(net, pop_e, pop_i, p=0.5, j=1.1))
1091    net.add_synapse(PairwiseBernoulliSynapse(net, pop_i, pop_e, p=0.5, j=-0.8))
1092    net.initialize(weight_mode=weight_mode, autapse=False, ram_budget_gb=0.1)
1093    return net
1094
1095
1096if __name__ == "__main__":
1097    np.random.seed(0)
1098    print("Running dense backend demo...")
1099    dense_net = _build_demo_network("dense")
1100    dense_net.run(steps=20, batch_size=4)
1101    print("Dense state:", dense_net.state)
1102    if sp is not None:
1103        print("Running sparse backend demo...")
1104        sparse_net = _build_demo_network("sparse")
1105        sparse_net.run(steps=20, batch_size=4)
1106        print("Sparse state:", sparse_net.state)
1107    else:
1108        print("SciPy not available; skipping sparse demo.")
class BinaryNeuronPopulation(Neuron):
291class BinaryNeuronPopulation(Neuron):
292    """Binary neuron population with threshold activation and configurable initializer.
293
294    Examples
295    --------
296    >>> net = BinaryNetwork("demo")
297    >>> pop = BinaryNeuronPopulation(net, N=3, threshold=0.5, initializer=0)
298    >>> net.add_population(pop)
299    <...BinaryNeuronPopulation object...>
300    """
301    def __init__(
302        self,
303        reference,
304        N=1,
305        threshold=1.0,
306        name="Binary Neuron Population",
307        tau=1.0,
308        initializer=None,
309        **kwargs,
310    ):
311        super().__init__(reference, N, name, tau=tau)
312        self.threshold = float(threshold)
313        self.initializer = initializer
314
315    def update(self, weights=None, state=None, index=None, input_value=None, **kwargs):
316        if input_value is None:
317            if weights is None or state is None:
318                raise ValueError("weights and state have to be provided when input_value is not set")
319            input_value = np.sum(weights * state)
320        return np.heaviside(input_value - self.threshold, 0)
321
322    def _initial_state(self):
323        if callable(self.initializer):
324            values = np.asarray(self.initializer(self.N), dtype=np.int16)
325            if values.size != self.N:
326                raise ValueError("Initializer must return {} entries".format(self.N))
327            return values
328        if self.initializer is None:
329            return np.random.choice([0, 1], size=self.N, p=[0.8, 0.2])
330        arr = np.asarray(self.initializer)
331        if arr.size == 1:
332            return np.full(self.N, int(arr.item()), dtype=np.int16)
333        if arr.size != self.N:
334            raise ValueError("Initializer must define exactly {} elements".format(self.N))
335        return arr.astype(np.int16)

Binary neuron population with threshold activation and configurable initializer.

Examples
>>> net = BinaryNetwork("demo")
>>> pop = BinaryNeuronPopulation(net, N=3, threshold=0.5, initializer=0)
>>> net.add_population(pop)
<...BinaryNeuronPopulation object...>
BinaryNeuronPopulation( reference, N=1, threshold=1.0, name='Binary Neuron Population', tau=1.0, initializer=None, **kwargs)
301    def __init__(
302        self,
303        reference,
304        N=1,
305        threshold=1.0,
306        name="Binary Neuron Population",
307        tau=1.0,
308        initializer=None,
309        **kwargs,
310    ):
311        super().__init__(reference, N, name, tau=tau)
312        self.threshold = float(threshold)
313        self.initializer = initializer
threshold
initializer
def update( self, weights=None, state=None, index=None, input_value=None, **kwargs):
315    def update(self, weights=None, state=None, index=None, input_value=None, **kwargs):
316        if input_value is None:
317            if weights is None or state is None:
318                raise ValueError("weights and state have to be provided when input_value is not set")
319            input_value = np.sum(weights * state)
320        return np.heaviside(input_value - self.threshold, 0)
class BackgroundActivity(Neuron):
338class BackgroundActivity(Neuron):
339    """Background population that provides a constant or stochastic drive."""
340    def __init__(self, reference, N=1, Activity=0.5, Stochastic=False, name="Background Activity", tau=1.0):
341        super().__init__(reference, N, name, tau=tau)
342        self.Activity = Activity
343        if Stochastic:
344            self.update = self.update_stochastic
345        else:
346            self.update = self.update_deterministic
347
348    def update_stochastic(self, weights=None, state=None, Index=None, **kwargs):
349        return np.random.choice([0, 1], 1) * self.update_deterministic(weights, state, **kwargs)
350
351    def update_deterministic(self, weights=None, state=None, Index=None, **kwargs):
352        # if activity is a float, set all neurons to this activity
353        if isinstance(self.Activity, float):
354            return self.Activity
355        # if activity is a function, set neurons by this function
356        elif callable(self.Activity):
357            return self.Activity()
358        else:
359            return 1.0
360
361    def initialze(self):
362        self.state = np.array([self.update() for _ in range(self.N)])

Background population that provides a constant or stochastic drive.

BackgroundActivity( reference, N=1, Activity=0.5, Stochastic=False, name='Background Activity', tau=1.0)
340    def __init__(self, reference, N=1, Activity=0.5, Stochastic=False, name="Background Activity", tau=1.0):
341        super().__init__(reference, N, name, tau=tau)
342        self.Activity = Activity
343        if Stochastic:
344            self.update = self.update_stochastic
345        else:
346            self.update = self.update_deterministic
Activity
def update_stochastic(self, weights=None, state=None, Index=None, **kwargs):
348    def update_stochastic(self, weights=None, state=None, Index=None, **kwargs):
349        return np.random.choice([0, 1], 1) * self.update_deterministic(weights, state, **kwargs)
def update_deterministic(self, weights=None, state=None, Index=None, **kwargs):
351    def update_deterministic(self, weights=None, state=None, Index=None, **kwargs):
352        # if activity is a float, set all neurons to this activity
353        if isinstance(self.Activity, float):
354            return self.Activity
355        # if activity is a function, set neurons by this function
356        elif callable(self.Activity):
357            return self.Activity()
358        else:
359            return 1.0
def initialze(self):
361    def initialze(self):
362        self.state = np.array([self.update() for _ in range(self.N)])
class PairwiseBernoulliSynapse(Synapse):
398class PairwiseBernoulliSynapse(Synapse):
399    """Sample independent Bernoulli connections for every pre/post pair.
400
401    Expected output
402    ---------------
403    After `network.initialize(...)`, the addressed weight block contains zeros
404    and `j`-valued entries sampled independently with probability `p`.
405    """
406    def __init__(self, reference, pre, post, p=0.5, j=1.0):
407        super().__init__(reference, pre, post)
408        self.p = float(p)
409        self.j = float(j)
410
411    def initialze(self):
412        p = self.p
413        iterations = 1
414        while p > 1:
415            p /= 2.0
416            iterations += 1
417        if self.reference.weight_mode == "sparse":
418            total_pairs = self.post.N * self.pre.N
419            if total_pairs == 0 or p <= 0.0:
420                return
421            data_chunks: List[np.ndarray] = []
422            row_chunks: List[np.ndarray] = []
423            col_chunks: List[np.ndarray] = []
424            for _ in range(iterations):
425                sample_count = int(np.random.binomial(total_pairs, p))
426                if sample_count <= 0:
427                    continue
428                if sample_count >= total_pairs:
429                    flat = np.arange(total_pairs, dtype=np.int64)
430                else:
431                    flat = np.random.choice(total_pairs, size=sample_count, replace=False).astype(np.int64, copy=False)
432                rows = flat // self.pre.N
433                cols = flat % self.pre.N
434                row_chunks.append(rows)
435                col_chunks.append(cols)
436                data_chunks.append(np.full(sample_count, self.j, dtype=self.reference.weight_dtype))
437            if data_chunks:
438                self._append_sparse_entries(
439                    np.concatenate(row_chunks),
440                    np.concatenate(col_chunks),
441                    np.concatenate(data_chunks),
442                )
443            return
444        shape = (self.post.N, self.pre.N)
445        block = np.zeros(shape, dtype=self.reference.weight_dtype)
446        for _ in range(iterations):
447            draws = (np.random.random(size=shape) < p).astype(block.dtype, copy=False)
448            block += draws * self.j
449        self._write_block(block)

Sample independent Bernoulli connections for every pre/post pair.

Expected output

After network.initialize(...), the addressed weight block contains zeros and j-valued entries sampled independently with probability p.

PairwiseBernoulliSynapse(reference, pre, post, p=0.5, j=1.0)
406    def __init__(self, reference, pre, post, p=0.5, j=1.0):
407        super().__init__(reference, pre, post)
408        self.p = float(p)
409        self.j = float(j)
p
j
def initialze(self):
411    def initialze(self):
412        p = self.p
413        iterations = 1
414        while p > 1:
415            p /= 2.0
416            iterations += 1
417        if self.reference.weight_mode == "sparse":
418            total_pairs = self.post.N * self.pre.N
419            if total_pairs == 0 or p <= 0.0:
420                return
421            data_chunks: List[np.ndarray] = []
422            row_chunks: List[np.ndarray] = []
423            col_chunks: List[np.ndarray] = []
424            for _ in range(iterations):
425                sample_count = int(np.random.binomial(total_pairs, p))
426                if sample_count <= 0:
427                    continue
428                if sample_count >= total_pairs:
429                    flat = np.arange(total_pairs, dtype=np.int64)
430                else:
431                    flat = np.random.choice(total_pairs, size=sample_count, replace=False).astype(np.int64, copy=False)
432                rows = flat // self.pre.N
433                cols = flat % self.pre.N
434                row_chunks.append(rows)
435                col_chunks.append(cols)
436                data_chunks.append(np.full(sample_count, self.j, dtype=self.reference.weight_dtype))
437            if data_chunks:
438                self._append_sparse_entries(
439                    np.concatenate(row_chunks),
440                    np.concatenate(col_chunks),
441                    np.concatenate(data_chunks),
442                )
443            return
444        shape = (self.post.N, self.pre.N)
445        block = np.zeros(shape, dtype=self.reference.weight_dtype)
446        for _ in range(iterations):
447            draws = (np.random.random(size=shape) < p).astype(block.dtype, copy=False)
448            block += draws * self.j
449        self._write_block(block)
class PoissonSynapse(Synapse):
452class PoissonSynapse(Synapse):
453    """Sample Poisson-distributed multi-edge counts per pre/post pair.
454
455    Expected output
456    ---------------
457    The addressed weight block contains non-negative multiples of `j` with
458    Poisson-distributed counts.
459    """
460    def __init__(self, reference, pre, post, rate=0.5, j=1.0):
461        super().__init__(reference, pre, post)
462        self.rate = float(rate)
463        self.j = float(j)
464
465    def initialze(self):
466        if self.reference.weight_mode == "sparse":
467            total_pairs = self.post.N * self.pre.N
468            if total_pairs == 0 or self.rate <= 0.0:
469                return
470            sample_count = int(np.random.poisson(lam=self.rate * total_pairs))
471            if sample_count <= 0:
472                return
473            flat = np.random.randint(total_pairs, size=sample_count, dtype=np.int64)
474            unique, counts = _compress_flat_samples(flat, total_pairs)
475            rows = unique // self.pre.N
476            cols = unique % self.pre.N
477            values = counts.astype(self.reference.weight_dtype, copy=False) * self.j
478            self._append_sparse_entries(rows, cols, values)
479            return
480        shape = (self.post.N, self.pre.N)
481        samples = np.random.poisson(lam=self.rate, size=shape).astype(self.reference.weight_dtype, copy=False)
482        self._write_block(samples * self.j)

Sample Poisson-distributed multi-edge counts per pre/post pair.

Expected output

The addressed weight block contains non-negative multiples of j with Poisson-distributed counts.

PoissonSynapse(reference, pre, post, rate=0.5, j=1.0)
460    def __init__(self, reference, pre, post, rate=0.5, j=1.0):
461        super().__init__(reference, pre, post)
462        self.rate = float(rate)
463        self.j = float(j)
rate
j
def initialze(self):
465    def initialze(self):
466        if self.reference.weight_mode == "sparse":
467            total_pairs = self.post.N * self.pre.N
468            if total_pairs == 0 or self.rate <= 0.0:
469                return
470            sample_count = int(np.random.poisson(lam=self.rate * total_pairs))
471            if sample_count <= 0:
472                return
473            flat = np.random.randint(total_pairs, size=sample_count, dtype=np.int64)
474            unique, counts = _compress_flat_samples(flat, total_pairs)
475            rows = unique // self.pre.N
476            cols = unique % self.pre.N
477            values = counts.astype(self.reference.weight_dtype, copy=False) * self.j
478            self._append_sparse_entries(rows, cols, values)
479            return
480        shape = (self.post.N, self.pre.N)
481        samples = np.random.poisson(lam=self.rate, size=shape).astype(self.reference.weight_dtype, copy=False)
482        self._write_block(samples * self.j)
class FixedIndegreeSynapse(Synapse):
485class FixedIndegreeSynapse(Synapse):
486    """Connect each target neuron to a fixed number of randomly drawn inputs.
487
488    Expected output
489    ---------------
490    Each target row receives approximately `round(p * N_pre)` incoming entries.
491    When `multapses` is `True`, presynaptic partners are sampled with replacement.
492    When `multapses` is `False`, partners are sampled without replacement.
493    """
494    def __init__(self, reference, pre, post, p=0.5, j=1.0, multapses=True):
495        super().__init__(reference, pre, post)
496        self.p = float(p)
497        self.j = float(j)
498        self.multapses = bool(multapses)
499
500    def initialze(self):
501        p = max(self.p, 0.0)
502        target_count = int(round(p * self.pre.N))
503        target_count = max(target_count, 0)
504        if target_count == 0:
505            return
506        if (not self.multapses) and target_count > self.pre.N:
507            raise ValueError(
508                f"Fixed-indegree sampling without multapses requested indegree {target_count} "
509                f"from only {self.pre.N} presynaptic neurons."
510            )
511        if self.reference.weight_mode == "sparse":
512            rows = np.repeat(np.arange(self.post.N, dtype=np.int64), target_count)
513            if self.multapses:
514                cols = np.random.randint(self.pre.N, size=self.post.N * target_count, dtype=np.int64)
515            else:
516                col_chunks = [
517                    np.random.choice(self.pre.N, size=target_count, replace=False).astype(np.int64, copy=False)
518                    for _ in range(self.post.N)
519                ]
520                cols = np.concatenate(col_chunks) if col_chunks else np.zeros(0, dtype=np.int64)
521            values = np.full(rows.size, self.j, dtype=self.reference.weight_dtype)
522            self._append_sparse_entries(rows, cols, values)
523            return
524        block = np.zeros((self.post.N, self.pre.N), dtype=self.reference.weight_dtype)
525        for tgt in range(self.post.N):
526            pres = np.random.choice(self.pre.N, size=target_count, replace=self.multapses)
527            np.add.at(block[tgt], pres, self.j)
528        self._write_block(block)

Connect each target neuron to a fixed number of randomly drawn inputs.

Expected output

Each target row receives approximately round(p * N_pre) incoming entries. When multapses is True, presynaptic partners are sampled with replacement. When multapses is False, partners are sampled without replacement.

FixedIndegreeSynapse(reference, pre, post, p=0.5, j=1.0, multapses=True)
494    def __init__(self, reference, pre, post, p=0.5, j=1.0, multapses=True):
495        super().__init__(reference, pre, post)
496        self.p = float(p)
497        self.j = float(j)
498        self.multapses = bool(multapses)
p
j
multapses
def initialze(self):
500    def initialze(self):
501        p = max(self.p, 0.0)
502        target_count = int(round(p * self.pre.N))
503        target_count = max(target_count, 0)
504        if target_count == 0:
505            return
506        if (not self.multapses) and target_count > self.pre.N:
507            raise ValueError(
508                f"Fixed-indegree sampling without multapses requested indegree {target_count} "
509                f"from only {self.pre.N} presynaptic neurons."
510            )
511        if self.reference.weight_mode == "sparse":
512            rows = np.repeat(np.arange(self.post.N, dtype=np.int64), target_count)
513            if self.multapses:
514                cols = np.random.randint(self.pre.N, size=self.post.N * target_count, dtype=np.int64)
515            else:
516                col_chunks = [
517                    np.random.choice(self.pre.N, size=target_count, replace=False).astype(np.int64, copy=False)
518                    for _ in range(self.post.N)
519                ]
520                cols = np.concatenate(col_chunks) if col_chunks else np.zeros(0, dtype=np.int64)
521            values = np.full(rows.size, self.j, dtype=self.reference.weight_dtype)
522            self._append_sparse_entries(rows, cols, values)
523            return
524        block = np.zeros((self.post.N, self.pre.N), dtype=self.reference.weight_dtype)
525        for tgt in range(self.post.N):
526            pres = np.random.choice(self.pre.N, size=target_count, replace=self.multapses)
527            np.add.at(block[tgt], pres, self.j)
528        self._write_block(block)
class AllToAllSynapse(Synapse):
531class AllToAllSynapse(Synapse):
532    """Dense all-to-all block with constant weight value.
533
534    Expected output
535    ---------------
536    The addressed weight block is filled with the constant value `j`.
537    """
538    def __init__(self, reference, pre, post, j=1.0):
539        super().__init__(reference, pre, post)
540        self.j = float(j)
541
542    def initialze(self):
543        if self.reference.weight_mode == "sparse":
544            total_pairs = self.post.N * self.pre.N
545            if total_pairs == 0:
546                return
547            rows = np.repeat(np.arange(self.post.N, dtype=np.int64), self.pre.N)
548            cols = np.tile(np.arange(self.pre.N, dtype=np.int64), self.post.N)
549            values = np.full(total_pairs, self.j, dtype=self.reference.weight_dtype)
550            self._append_sparse_entries(rows, cols, values)
551            return
552        block = np.full((self.post.N, self.pre.N), self.j, dtype=self.reference.weight_dtype)
553        self._write_block(block)

Dense all-to-all block with constant weight value.

Expected output

The addressed weight block is filled with the constant value j.

AllToAllSynapse(reference, pre, post, j=1.0)
538    def __init__(self, reference, pre, post, j=1.0):
539        super().__init__(reference, pre, post)
540        self.j = float(j)
j
def initialze(self):
542    def initialze(self):
543        if self.reference.weight_mode == "sparse":
544            total_pairs = self.post.N * self.pre.N
545            if total_pairs == 0:
546                return
547            rows = np.repeat(np.arange(self.post.N, dtype=np.int64), self.pre.N)
548            cols = np.tile(np.arange(self.pre.N, dtype=np.int64), self.post.N)
549            values = np.full(total_pairs, self.j, dtype=self.reference.weight_dtype)
550            self._append_sparse_entries(rows, cols, values)
551            return
552        block = np.full((self.post.N, self.pre.N), self.j, dtype=self.reference.weight_dtype)
553        self._write_block(block)
class BinaryNetwork:
 556class BinaryNetwork:
 557    """Binary network simulator with dense/sparse backends and diff-log tracing.
 558
 559    Examples
 560    --------
 561    >>> np.random.seed(0)
 562    >>> net = BinaryNetwork("demo")
 563    >>> exc = BinaryNeuronPopulation(net, N=2, threshold=0.5, tau=5.0, initializer=[0, 1])
 564    >>> inh = BinaryNeuronPopulation(net, N=1, threshold=0.5, tau=5.0, initializer=[0])
 565    >>> net.add_population(exc)
 566    <...BinaryNeuronPopulation object...>
 567    >>> net.add_population(inh)
 568    <...BinaryNeuronPopulation object...>
 569    >>> net.add_synapse(AllToAllSynapse(net, exc, exc, j=0.6))
 570    >>> net.initialize(weight_mode="dense", autapse=False)
 571    >>> net.state.shape
 572    (3,)
 573    """
 574    def __init__(self, name="Some Binary Network"):
 575        self.name = name
 576        self.N = 0
 577        self.population: List[Neuron] = []
 578        self.synapses: List[Synapse] = []
 579        self.state: Optional[np.ndarray] = None
 580        self.weights_dense: Optional[np.ndarray] = None
 581        self.weights_csr = None
 582        self.weights_csc = None
 583        self.weights = None  # compatibility alias
 584        self.LUT = None  # look up table for the update function
 585        self.sim_steps = 0
 586        self.population_lookup = None
 587        self.neuron_lookup = None
 588        self.update_prob = None
 589        self.thresholds = None
 590        self.field = None
 591        self.weight_mode = "dense"
 592        self.weight_dtype = np.float32
 593        self._population_views = np.zeros((0, 2), dtype=np.int64)
 594        self._population_cdf = np.zeros(0, dtype=np.float64)
 595        self._sparse_rows: List[np.ndarray] = []
 596        self._sparse_cols: List[np.ndarray] = []
 597        self._sparse_data: List[np.ndarray] = []
 598        self._step_log_buffer: Optional[np.ndarray] = None
 599        self._step_log_index = 0
 600        self._step_log_dummy = np.zeros((0, 0), dtype=np.int8)
 601        self._diff_log_updates: Optional[np.ndarray] = None
 602        self._diff_log_deltas: Optional[np.ndarray] = None
 603        self._diff_log_index = 0
 604        self._diff_log_dummy_updates = np.zeros(0, dtype=np.uint16)
 605        self._diff_log_dummy_deltas = np.zeros(0, dtype=np.int8)
 606
 607    def add_population(self, population: Neuron):
 608        self.population.append(population)
 609        self.N += population.N
 610        return population
 611
 612    def add_synapse(self, synapse: Synapse):
 613        self.synapses.append(synapse)
 614
 615    def initialize(
 616        self,
 617        autapse: bool = False,
 618        weight_mode: str = "auto",
 619        ram_budget_gb: float = 12.0,
 620        weight_dtype=np.float32,
 621    ):
 622        """Allocate state, sample connectivity, and prepare the cached input field.
 623
 624        Examples
 625        --------
 626        >>> np.random.seed(1)
 627        >>> net = BinaryNetwork("init-demo")
 628        >>> pop = BinaryNeuronPopulation(net, N=2, threshold=0.1, initializer=[1, 0])
 629        >>> net.add_population(pop)
 630        <...BinaryNeuronPopulation object...>
 631        >>> net.initialize(weight_mode="dense")
 632        >>> net.field.shape
 633        (2,)
 634        """
 635        if self.N == 0:
 636            raise RuntimeError("Cannot initialize network without populations.")
 637        self.weight_dtype = np.dtype(weight_dtype)
 638        if self.weight_dtype not in (np.float32, np.float64):
 639            raise ValueError("weight_dtype must be float32 or float64.")
 640        self.state = np.zeros(self.N, dtype=np.int8)
 641        self.field = np.zeros(self.N, dtype=self.weight_dtype)
 642        self.update_prob = np.zeros(self.N, dtype=self.weight_dtype)
 643        self.population_lookup = np.zeros(self.N, dtype=np.int32)
 644        self.neuron_lookup = np.zeros(self.N, dtype=np.int32)
 645        self.thresholds = np.zeros(self.N, dtype=self.weight_dtype)
 646        pop_count = len(self.population)
 647        pop_views = np.zeros((pop_count, 2), dtype=np.int64)
 648        pop_update_mass = np.zeros(pop_count, dtype=np.float64)
 649        N_start = 0
 650        for idx, population in enumerate(self.population):
 651            population.set_view([N_start, N_start + population.N])
 652            N_start += population.N
 653            population.initialze()
 654            pop_views[idx, :] = population.view
 655            pop_update_mass[idx] = population.N / max(population.tau, 1e-9)
 656            self.update_prob[population.view[0]:population.view[1]] = 1.0 / max(population.tau, 1e-9)
 657            self.population_lookup[population.view[0]:population.view[1]] = idx
 658            self.neuron_lookup[population.view[0]:population.view[1]] = np.arange(
 659                population.N, dtype=np.int32
 660            )
 661            threshold_value = getattr(population, "threshold", 0.0)
 662            self.thresholds[population.view[0]:population.view[1]] = float(threshold_value)
 663        self.LUT = np.array([population.view for population in self.population])
 664        total = float(self.update_prob.sum())
 665        if not math.isfinite(total) or total <= 0:
 666            raise RuntimeError("Invalid update probabilities. Check tau values.")
 667        self.update_prob /= total
 668        pop_mass_total = float(pop_update_mass.sum())
 669        if not math.isfinite(pop_mass_total) or pop_mass_total <= 0:
 670            raise RuntimeError("Invalid population update masses. Check tau values.")
 671        self._population_views = pop_views
 672        self._population_cdf = np.cumsum(pop_update_mass / pop_mass_total)
 673        self.weight_mode = self._choose_weight_mode(weight_mode, ram_budget_gb)
 674        self.weights_dense = None
 675        self.weights_csr = None
 676        self.weights_csc = None
 677        self.weights = None
 678        self._sparse_rows.clear()
 679        self._sparse_cols.clear()
 680        self._sparse_data.clear()
 681        if self.weight_mode == "dense":
 682            self.weights_dense = np.zeros((self.N, self.N), dtype=self.weight_dtype)
 683        else:
 684            if sp is None:
 685                raise ModuleNotFoundError(
 686                    "SciPy is required for sparse weight mode. Install it via 'pip install scipy'."
 687                )
 688        for synapse in self.synapses:
 689            synapse.set_view(
 690                np.array(
 691                    [[synapse.pre.view[0], synapse.pre.view[1]], [synapse.post.view[0], synapse.post.view[1]]],
 692                    dtype=np.int64,
 693                )
 694            )
 695            synapse.initialze()
 696        if self.weight_mode == "dense":
 697            if not autapse:
 698                np.fill_diagonal(self.weights_dense, 0.0)
 699            self.weights = self.weights_dense
 700        else:
 701            row = np.concatenate(self._sparse_rows) if self._sparse_rows else np.zeros(0, dtype=np.int64)
 702            col = np.concatenate(self._sparse_cols) if self._sparse_cols else np.zeros(0, dtype=np.int64)
 703            data = np.concatenate(self._sparse_data) if self._sparse_data else np.zeros(0, dtype=self.weight_dtype)
 704            if not autapse and row.size:
 705                keep = row != col
 706                row = row[keep]
 707                col = col[keep]
 708                data = data[keep]
 709            matrix = sp.coo_matrix((data, (row, col)), shape=(self.N, self.N), dtype=self.weight_dtype)
 710            self.weights_csr = matrix.tocsr()
 711            self.weights_csc = matrix.tocsc()
 712            self.weights = self.weights_csr
 713        self._recompute_field()
 714        self.sim_steps = 0
 715
 716    def _recompute_field(self):
 717        state_float = self.state.astype(self.weight_dtype, copy=False)
 718        if self.weight_mode == "dense":
 719            self.field = self.weights_dense @ state_float
 720        else:
 721            self.field = self.weights_csr.dot(state_float).astype(self.weight_dtype, copy=False)
 722
 723    def _write_weight_block(self, row_start, row_end, col_start, col_end, block):
 724        block = np.asarray(block, dtype=self.weight_dtype)
 725        if block.shape != (row_end - row_start, col_end - col_start):
 726            raise ValueError("Block shape does not match target slice.")
 727        if self.weight_mode == "dense":
 728            self.weights_dense[row_start:row_end, col_start:col_end] += block
 729        else:
 730            rows = np.arange(row_start, row_end, dtype=np.int64)
 731            cols = np.arange(col_start, col_end, dtype=np.int64)
 732            row_idx = np.repeat(rows, block.shape[1])
 733            col_idx = np.tile(cols, block.shape[0])
 734            values = block.reshape(-1)
 735            mask = values != 0.0
 736            if mask.any():
 737                self._sparse_rows.append(row_idx[mask])
 738                self._sparse_cols.append(col_idx[mask])
 739                self._sparse_data.append(values[mask])
 740
 741    def _append_sparse_entries(self, row_idx: np.ndarray, col_idx: np.ndarray, values: np.ndarray) -> None:
 742        if self.weight_mode != "sparse":
 743            raise RuntimeError("Sparse entries can only be appended in sparse weight mode.")
 744        rows = np.asarray(row_idx, dtype=np.int64).ravel()
 745        cols = np.asarray(col_idx, dtype=np.int64).ravel()
 746        data = np.asarray(values, dtype=self.weight_dtype).ravel()
 747        if rows.size != cols.size or rows.size != data.size:
 748            raise ValueError("Sparse row/col/data arrays must have the same length.")
 749        if rows.size == 0:
 750            return
 751        mask = data != 0.0
 752        if mask.any():
 753            self._sparse_rows.append(rows[mask])
 754            self._sparse_cols.append(cols[mask])
 755            self._sparse_data.append(data[mask])
 756
 757    def _choose_weight_mode(self, requested: str, ram_budget_gb: float) -> str:
 758        requested = str(requested or "auto").lower()
 759        if requested in {"dense", "sparse"}:
 760            return requested
 761        if requested != "auto":
 762            raise ValueError("weight_mode must be 'dense', 'sparse', or 'auto'.")
 763        # Estimate dense memory footprint assuming float32.
 764        bytes_per_entry = np.dtype(self.weight_dtype).itemsize
 765        dense_bytes = self.N * self.N * bytes_per_entry
 766        dense_gb = dense_bytes / (1024 ** 3)
 767        safety = 0.6 * float(ram_budget_gb)
 768        print("Simulating network as: " + "dense" if dense_gb <= safety else "sparse")
 769        return "dense" if dense_gb <= safety else "sparse"
 770
 771    def _select_neurons(self, count: int) -> np.ndarray:
 772        if count <= 0:
 773            return np.zeros(0, dtype=np.int64)
 774        if self._population_cdf.size == 0:
 775            return np.random.choice(self.N, size=count, p=self.update_prob)
 776        pop_indices = np.searchsorted(self._population_cdf, np.random.random(size=count), side="right")
 777        pop_indices = np.minimum(pop_indices, self._population_views.shape[0] - 1)
 778        counts = np.bincount(pop_indices, minlength=self._population_views.shape[0])
 779        neurons = np.empty(count, dtype=np.int64)
 780        offset = 0
 781        for pop_idx, pop_count in enumerate(counts):
 782            if pop_count == 0:
 783                continue
 784            start, end = self._population_views[pop_idx]
 785            neurons[offset:offset + pop_count] = np.random.randint(start, end, size=pop_count)
 786            offset += pop_count
 787        np.random.shuffle(neurons)
 788        return neurons
 789
 790    def _update_batch_dense(self, neurons: np.ndarray, log_states, log_enabled: bool, log_offset: int):
 791        if neurons.size == 0:
 792            return
 793        diff_enabled = self._diff_log_updates is not None and self._diff_log_deltas is not None
 794        if diff_enabled:
 795            diff_updates = self._diff_log_updates
 796            diff_deltas = self._diff_log_deltas
 797            diff_offset = self._diff_log_index
 798        else:
 799            diff_updates = self._diff_log_dummy_updates
 800            diff_deltas = self._diff_log_dummy_deltas
 801            diff_offset = 0
 802        _dense_batch_kernel(
 803            neurons,
 804            self.state,
 805            self.field,
 806            self.thresholds,
 807            self.weights_dense,
 808            log_states,
 809            log_enabled,
 810            log_offset,
 811            diff_updates,
 812            diff_deltas,
 813            diff_enabled,
 814            diff_offset,
 815        )
 816
 817    def _update_batch_sparse(self, neurons: np.ndarray, log_states, log_enabled: bool, log_offset: int):
 818        if neurons.size == 0:
 819            return
 820        diff_enabled = self._diff_log_updates is not None and self._diff_log_deltas is not None
 821        if diff_enabled:
 822            diff_updates = self._diff_log_updates
 823            diff_deltas = self._diff_log_deltas
 824            diff_offset = self._diff_log_index
 825        else:
 826            diff_updates = self._diff_log_dummy_updates
 827            diff_deltas = self._diff_log_dummy_deltas
 828            diff_offset = 0
 829        _sparse_batch_kernel(
 830            neurons,
 831            self.state,
 832            self.field,
 833            self.thresholds,
 834            self.weights_csc.data,
 835            self.weights_csc.indices,
 836            self.weights_csc.indptr,
 837            log_states,
 838            log_enabled,
 839            log_offset,
 840            diff_updates,
 841            diff_deltas,
 842            diff_enabled,
 843            diff_offset,
 844        )
 845
 846    def update(self):
 847        neuron = self._select_neurons(1)
 848        self._process_batch(neuron)
 849
 850    def _process_batch(self, neurons: Sequence[int]):
 851        neurons = np.asarray(neurons, dtype=np.int64)
 852        log_enabled = self._step_log_buffer is not None
 853        log_offset = self._step_log_index
 854        if log_enabled and self._step_log_buffer is not None:
 855            if log_offset + neurons.size > self._step_log_buffer.shape[0]:
 856                raise RuntimeError(
 857                    "Step logging buffer exhausted. Increase allocated steps when enabling step logging."
 858                )
 859            log_buffer = self._step_log_buffer
 860        else:
 861            log_buffer = self._step_log_dummy
 862        if self._diff_log_updates is not None and self._diff_log_deltas is not None:
 863            if self._diff_log_index + neurons.size > self._diff_log_updates.shape[0]:
 864                raise RuntimeError("Diff logging buffer exhausted. Increase allocated steps when enabling diff logging.")
 865        if self.weight_mode == "dense":
 866            self._update_batch_dense(neurons, log_buffer, log_enabled, log_offset)
 867        else:
 868            self._update_batch_sparse(neurons, log_buffer, log_enabled, log_offset)
 869        if log_enabled:
 870            self._step_log_index += neurons.size
 871        if self._diff_log_updates is not None and self._diff_log_deltas is not None:
 872            self._diff_log_index += neurons.size
 873        self.sim_steps += neurons.size
 874
 875    def run(self, steps=1000, batch_size=1):
 876        """Advance the network for `steps` asynchronous updates.
 877
 878        Examples
 879        --------
 880        >>> np.random.seed(2)
 881        >>> net = BinaryNetwork("run-demo")
 882        >>> pop = BinaryNeuronPopulation(net, N=2, threshold=0.1, initializer=[1, 0])
 883        >>> net.add_population(pop)
 884        <...BinaryNeuronPopulation object...>
 885        >>> net.initialize(weight_mode="dense")
 886        >>> net.run(steps=4, batch_size=2)
 887        >>> int(net.sim_steps)
 888        4
 889        """
 890        if self.state is None or self.update_prob is None:
 891            raise RuntimeError("Call initialize() before run().")
 892        if batch_size <= 0:
 893            raise ValueError("batch_size must be positive.")
 894        steps_done = 0
 895        while steps_done < steps:
 896            current_batch = min(batch_size, steps - steps_done)
 897            neurons = self._select_neurons(current_batch)
 898            self._process_batch(neurons)
 899            steps_done += current_batch
 900
 901    def enable_step_logging(self, steps: int):
 902        steps = int(max(0, steps))
 903        if steps == 0:
 904            self._step_log_buffer = None
 905            self._step_log_index = 0
 906            return
 907        self._step_log_buffer = np.zeros((steps, self.N), dtype=np.int8)
 908        self._step_log_index = 0
 909
 910    def consume_step_log(self) -> np.ndarray:
 911        if self._step_log_buffer is None:
 912            return np.zeros((0, self.N), dtype=np.int8)
 913        filled = min(self._step_log_index, self._step_log_buffer.shape[0])
 914        data = self._step_log_buffer[:filled].copy()
 915        self._step_log_buffer = None
 916        self._step_log_index = 0
 917        return data
 918
 919    def enable_diff_logging(self, steps: int):
 920        """Allocate a diff-log buffer for `steps` asynchronous updates.
 921
 922        Examples
 923        --------
 924        >>> net = BinaryNetwork("log-demo")
 925        >>> pop = BinaryNeuronPopulation(net, N=1, threshold=0.0, initializer=[0])
 926        >>> net.add_population(pop)
 927        <...BinaryNeuronPopulation object...>
 928        >>> net.initialize(weight_mode="dense")
 929        >>> net.enable_diff_logging(steps=3)
 930        >>> net._diff_log_updates.shape
 931        (3,)
 932        """
 933        steps = int(max(0, steps))
 934        if steps == 0:
 935            self._diff_log_updates = None
 936            self._diff_log_deltas = None
 937            self._diff_log_index = 0
 938            return
 939        self._diff_log_updates = np.zeros(steps, dtype=np.uint16)
 940        self._diff_log_deltas = np.zeros(steps, dtype=np.int8)
 941        self._diff_log_index = 0
 942
 943    def consume_diff_log(self) -> tuple[np.ndarray, np.ndarray]:
 944        """Return the recorded diff log as `(updates, deltas)` row arrays.
 945
 946        Expected output
 947        ---------------
 948        The returned arrays have shape `(1, recorded_steps)` for the current
 949        simulation backend.
 950        """
 951        if self._diff_log_updates is None or self._diff_log_deltas is None:
 952            return np.zeros((1, 0), dtype=np.uint16), np.zeros((1, 0), dtype=np.int8)
 953        filled = min(self._diff_log_index, self._diff_log_updates.shape[0])
 954        updates = self._diff_log_updates[:filled].copy()[None, :]
 955        deltas = self._diff_log_deltas[:filled].copy()[None, :]
 956        self._diff_log_updates = None
 957        self._diff_log_deltas = None
 958        self._diff_log_index = 0
 959        return updates, deltas
 960
 961    @staticmethod
 962    def reconstruct_states_from_diff_logs(
 963        initial_state: np.ndarray,
 964        updates: np.ndarray,
 965        deltas: np.ndarray,
 966        *,
 967        sample_interval: int = 1,
 968    ) -> np.ndarray:
 969        """Reconstruct sampled full network states from diff-log traces.
 970
 971        Examples
 972        --------
 973        >>> BinaryNetwork.reconstruct_states_from_diff_logs(
 974        ...     initial_state=np.array([0, 0], dtype=np.uint8),
 975        ...     updates=np.array([[0, 1, 0]], dtype=np.uint16),
 976        ...     deltas=np.array([[1, 1, -1]], dtype=np.int8),
 977        ... )
 978        array([[1, 0],
 979               [1, 1],
 980               [0, 1]], dtype=uint8)
 981        """
 982        update_arr = np.asarray(updates, dtype=np.int64)
 983        delta_arr = np.asarray(deltas, dtype=np.int8)
 984        if update_arr.ndim == 1:
 985            update_arr = update_arr[None, :]
 986        if delta_arr.ndim == 1:
 987            delta_arr = delta_arr[None, :]
 988        state = np.asarray(initial_state, dtype=np.int8).ravel().copy()
 989        if update_arr.ndim != 2 or delta_arr.shape != update_arr.shape:
 990            return np.zeros((0, state.size), dtype=np.uint8)
 991        stride = max(1, int(sample_interval))
 992        return _reconstruct_states_kernel(state, update_arr, delta_arr, stride)
 993
 994    def population_rates_from_diff_logs(
 995        self,
 996        initial_state: np.ndarray,
 997        updates: np.ndarray,
 998        deltas: np.ndarray,
 999        *,
1000        sample_interval: int = 1,
1001        populations: Optional[Sequence[Neuron]] = None,
1002    ) -> np.ndarray:
1003        """Compute per-population activity traces directly from diff logs.
1004
1005        Examples
1006        --------
1007        >>> net = BinaryNetwork("rates-demo")
1008        >>> pop_a = BinaryNeuronPopulation(net, N=1, initializer=[0])
1009        >>> pop_b = BinaryNeuronPopulation(net, N=1, initializer=[0])
1010        >>> net.add_population(pop_a)
1011        <...BinaryNeuronPopulation object...>
1012        >>> net.add_population(pop_b)
1013        <...BinaryNeuronPopulation object...>
1014        >>> net.initialize(weight_mode="dense")
1015        >>> rates = net.population_rates_from_diff_logs(
1016        ...     np.array([0, 0], dtype=np.uint8),
1017        ...     np.array([[0, 1, 0]], dtype=np.uint16),
1018        ...     np.array([[1, 1, -1]], dtype=np.int8),
1019        ... )
1020        >>> rates.tolist()
1021        [[1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]
1022        """
1023        pops = list(self.population if populations is None else populations)
1024        if not pops:
1025            return np.zeros((0, 0), dtype=np.float32)
1026        update_arr = np.asarray(updates, dtype=np.int64)
1027        delta_arr = np.asarray(deltas, dtype=np.int8)
1028        if update_arr.ndim == 1:
1029            update_arr = update_arr[None, :]
1030        if delta_arr.ndim == 1:
1031            delta_arr = delta_arr[None, :]
1032        if update_arr.ndim != 2 or delta_arr.shape != update_arr.shape:
1033            return np.zeros((0, len(pops)), dtype=np.float32)
1034        initial = np.asarray(initial_state, dtype=np.int8).ravel()
1035        pop_count = len(pops)
1036        sizes = np.zeros(pop_count, dtype=np.float32)
1037        cluster_of = np.empty(initial.size, dtype=np.int32)
1038        cluster_sums = np.zeros(pop_count, dtype=np.int64)
1039        for idx, pop in enumerate(pops):
1040            start, end = int(pop.view[0]), int(pop.view[1])
1041            cluster_of[start:end] = idx
1042            sizes[idx] = max(1, end - start)
1043            cluster_sums[idx] = int(initial[start:end].sum())
1044        stride = max(1, int(sample_interval))
1045        return _population_rates_kernel(cluster_sums, sizes, cluster_of, update_arr, delta_arr, stride)
1046
1047    @staticmethod
1048    def extract_spike_events_from_diff_logs(
1049        updates: np.ndarray,
1050        deltas: np.ndarray,
1051    ) -> tuple[np.ndarray, np.ndarray]:
1052        """Extract onset events `(times, neuron_ids)` from diff-log traces.
1053
1054        Examples
1055        --------
1056        >>> times, ids = BinaryNetwork.extract_spike_events_from_diff_logs(
1057        ...     np.array([[0, 1, 0]], dtype=np.uint16),
1058        ...     np.array([[1, 1, -1]], dtype=np.int8),
1059        ... )
1060        >>> times.tolist()
1061        [0.0, 1.0]
1062        >>> ids.tolist()
1063        [0, 1]
1064        """
1065        update_arr = np.asarray(updates, dtype=np.int64)
1066        delta_arr = np.asarray(deltas, dtype=np.int8)
1067        if update_arr.ndim == 1:
1068            update_arr = update_arr[None, :]
1069        if delta_arr.ndim == 1:
1070            delta_arr = delta_arr[None, :]
1071        if update_arr.ndim != 2 or delta_arr.shape != update_arr.shape:
1072            return np.zeros(0, dtype=np.float64), np.zeros(0, dtype=np.int64)
1073        mask = delta_arr > 0
1074        if not mask.any():
1075            return np.zeros(0, dtype=np.float64), np.zeros(0, dtype=np.int64)
1076        per_step, steps = update_arr.shape
1077        times = np.repeat(np.arange(steps, dtype=np.int64), per_step)
1078        flat_updates = update_arr.reshape(-1, order="F")
1079        flat_mask = mask.reshape(-1, order="F")
1080        return times[flat_mask].astype(np.float64, copy=False), flat_updates[flat_mask].astype(np.int64, copy=False)

Binary network simulator with dense/sparse backends and diff-log tracing.

Examples
>>> np.random.seed(0)
>>> net = BinaryNetwork("demo")
>>> exc = BinaryNeuronPopulation(net, N=2, threshold=0.5, tau=5.0, initializer=[0, 1])
>>> inh = BinaryNeuronPopulation(net, N=1, threshold=0.5, tau=5.0, initializer=[0])
>>> net.add_population(exc)
<...BinaryNeuronPopulation object...>
>>> net.add_population(inh)
<...BinaryNeuronPopulation object...>
>>> net.add_synapse(AllToAllSynapse(net, exc, exc, j=0.6))
>>> net.initialize(weight_mode="dense", autapse=False)
>>> net.state.shape
(3,)
BinaryNetwork(name='Some Binary Network')
574    def __init__(self, name="Some Binary Network"):
575        self.name = name
576        self.N = 0
577        self.population: List[Neuron] = []
578        self.synapses: List[Synapse] = []
579        self.state: Optional[np.ndarray] = None
580        self.weights_dense: Optional[np.ndarray] = None
581        self.weights_csr = None
582        self.weights_csc = None
583        self.weights = None  # compatibility alias
584        self.LUT = None  # look up table for the update function
585        self.sim_steps = 0
586        self.population_lookup = None
587        self.neuron_lookup = None
588        self.update_prob = None
589        self.thresholds = None
590        self.field = None
591        self.weight_mode = "dense"
592        self.weight_dtype = np.float32
593        self._population_views = np.zeros((0, 2), dtype=np.int64)
594        self._population_cdf = np.zeros(0, dtype=np.float64)
595        self._sparse_rows: List[np.ndarray] = []
596        self._sparse_cols: List[np.ndarray] = []
597        self._sparse_data: List[np.ndarray] = []
598        self._step_log_buffer: Optional[np.ndarray] = None
599        self._step_log_index = 0
600        self._step_log_dummy = np.zeros((0, 0), dtype=np.int8)
601        self._diff_log_updates: Optional[np.ndarray] = None
602        self._diff_log_deltas: Optional[np.ndarray] = None
603        self._diff_log_index = 0
604        self._diff_log_dummy_updates = np.zeros(0, dtype=np.uint16)
605        self._diff_log_dummy_deltas = np.zeros(0, dtype=np.int8)
name
N
population: List[BinaryNetwork.BinaryNetwork.Neuron]
synapses: List[BinaryNetwork.BinaryNetwork.Synapse]
state: Optional[numpy.ndarray]
weights_dense: Optional[numpy.ndarray]
weights_csr
weights_csc
weights
LUT
sim_steps
population_lookup
neuron_lookup
update_prob
thresholds
field
weight_mode
weight_dtype
def add_population(self, population: BinaryNetwork.BinaryNetwork.Neuron):
607    def add_population(self, population: Neuron):
608        self.population.append(population)
609        self.N += population.N
610        return population
def add_synapse(self, synapse: BinaryNetwork.BinaryNetwork.Synapse):
612    def add_synapse(self, synapse: Synapse):
613        self.synapses.append(synapse)
def initialize( self, autapse: bool = False, weight_mode: str = 'auto', ram_budget_gb: float = 12.0, weight_dtype=<class 'numpy.float32'>):
615    def initialize(
616        self,
617        autapse: bool = False,
618        weight_mode: str = "auto",
619        ram_budget_gb: float = 12.0,
620        weight_dtype=np.float32,
621    ):
622        """Allocate state, sample connectivity, and prepare the cached input field.
623
624        Examples
625        --------
626        >>> np.random.seed(1)
627        >>> net = BinaryNetwork("init-demo")
628        >>> pop = BinaryNeuronPopulation(net, N=2, threshold=0.1, initializer=[1, 0])
629        >>> net.add_population(pop)
630        <...BinaryNeuronPopulation object...>
631        >>> net.initialize(weight_mode="dense")
632        >>> net.field.shape
633        (2,)
634        """
635        if self.N == 0:
636            raise RuntimeError("Cannot initialize network without populations.")
637        self.weight_dtype = np.dtype(weight_dtype)
638        if self.weight_dtype not in (np.float32, np.float64):
639            raise ValueError("weight_dtype must be float32 or float64.")
640        self.state = np.zeros(self.N, dtype=np.int8)
641        self.field = np.zeros(self.N, dtype=self.weight_dtype)
642        self.update_prob = np.zeros(self.N, dtype=self.weight_dtype)
643        self.population_lookup = np.zeros(self.N, dtype=np.int32)
644        self.neuron_lookup = np.zeros(self.N, dtype=np.int32)
645        self.thresholds = np.zeros(self.N, dtype=self.weight_dtype)
646        pop_count = len(self.population)
647        pop_views = np.zeros((pop_count, 2), dtype=np.int64)
648        pop_update_mass = np.zeros(pop_count, dtype=np.float64)
649        N_start = 0
650        for idx, population in enumerate(self.population):
651            population.set_view([N_start, N_start + population.N])
652            N_start += population.N
653            population.initialze()
654            pop_views[idx, :] = population.view
655            pop_update_mass[idx] = population.N / max(population.tau, 1e-9)
656            self.update_prob[population.view[0]:population.view[1]] = 1.0 / max(population.tau, 1e-9)
657            self.population_lookup[population.view[0]:population.view[1]] = idx
658            self.neuron_lookup[population.view[0]:population.view[1]] = np.arange(
659                population.N, dtype=np.int32
660            )
661            threshold_value = getattr(population, "threshold", 0.0)
662            self.thresholds[population.view[0]:population.view[1]] = float(threshold_value)
663        self.LUT = np.array([population.view for population in self.population])
664        total = float(self.update_prob.sum())
665        if not math.isfinite(total) or total <= 0:
666            raise RuntimeError("Invalid update probabilities. Check tau values.")
667        self.update_prob /= total
668        pop_mass_total = float(pop_update_mass.sum())
669        if not math.isfinite(pop_mass_total) or pop_mass_total <= 0:
670            raise RuntimeError("Invalid population update masses. Check tau values.")
671        self._population_views = pop_views
672        self._population_cdf = np.cumsum(pop_update_mass / pop_mass_total)
673        self.weight_mode = self._choose_weight_mode(weight_mode, ram_budget_gb)
674        self.weights_dense = None
675        self.weights_csr = None
676        self.weights_csc = None
677        self.weights = None
678        self._sparse_rows.clear()
679        self._sparse_cols.clear()
680        self._sparse_data.clear()
681        if self.weight_mode == "dense":
682            self.weights_dense = np.zeros((self.N, self.N), dtype=self.weight_dtype)
683        else:
684            if sp is None:
685                raise ModuleNotFoundError(
686                    "SciPy is required for sparse weight mode. Install it via 'pip install scipy'."
687                )
688        for synapse in self.synapses:
689            synapse.set_view(
690                np.array(
691                    [[synapse.pre.view[0], synapse.pre.view[1]], [synapse.post.view[0], synapse.post.view[1]]],
692                    dtype=np.int64,
693                )
694            )
695            synapse.initialze()
696        if self.weight_mode == "dense":
697            if not autapse:
698                np.fill_diagonal(self.weights_dense, 0.0)
699            self.weights = self.weights_dense
700        else:
701            row = np.concatenate(self._sparse_rows) if self._sparse_rows else np.zeros(0, dtype=np.int64)
702            col = np.concatenate(self._sparse_cols) if self._sparse_cols else np.zeros(0, dtype=np.int64)
703            data = np.concatenate(self._sparse_data) if self._sparse_data else np.zeros(0, dtype=self.weight_dtype)
704            if not autapse and row.size:
705                keep = row != col
706                row = row[keep]
707                col = col[keep]
708                data = data[keep]
709            matrix = sp.coo_matrix((data, (row, col)), shape=(self.N, self.N), dtype=self.weight_dtype)
710            self.weights_csr = matrix.tocsr()
711            self.weights_csc = matrix.tocsc()
712            self.weights = self.weights_csr
713        self._recompute_field()
714        self.sim_steps = 0

Allocate state, sample connectivity, and prepare the cached input field.

Examples
>>> np.random.seed(1)
>>> net = BinaryNetwork("init-demo")
>>> pop = BinaryNeuronPopulation(net, N=2, threshold=0.1, initializer=[1, 0])
>>> net.add_population(pop)
<...BinaryNeuronPopulation object...>
>>> net.initialize(weight_mode="dense")
>>> net.field.shape
(2,)
def update(self):
846    def update(self):
847        neuron = self._select_neurons(1)
848        self._process_batch(neuron)
def run(self, steps=1000, batch_size=1):
875    def run(self, steps=1000, batch_size=1):
876        """Advance the network for `steps` asynchronous updates.
877
878        Examples
879        --------
880        >>> np.random.seed(2)
881        >>> net = BinaryNetwork("run-demo")
882        >>> pop = BinaryNeuronPopulation(net, N=2, threshold=0.1, initializer=[1, 0])
883        >>> net.add_population(pop)
884        <...BinaryNeuronPopulation object...>
885        >>> net.initialize(weight_mode="dense")
886        >>> net.run(steps=4, batch_size=2)
887        >>> int(net.sim_steps)
888        4
889        """
890        if self.state is None or self.update_prob is None:
891            raise RuntimeError("Call initialize() before run().")
892        if batch_size <= 0:
893            raise ValueError("batch_size must be positive.")
894        steps_done = 0
895        while steps_done < steps:
896            current_batch = min(batch_size, steps - steps_done)
897            neurons = self._select_neurons(current_batch)
898            self._process_batch(neurons)
899            steps_done += current_batch

Advance the network for steps asynchronous updates.

Examples
>>> np.random.seed(2)
>>> net = BinaryNetwork("run-demo")
>>> pop = BinaryNeuronPopulation(net, N=2, threshold=0.1, initializer=[1, 0])
>>> net.add_population(pop)
<...BinaryNeuronPopulation object...>
>>> net.initialize(weight_mode="dense")
>>> net.run(steps=4, batch_size=2)
>>> int(net.sim_steps)
4
def enable_step_logging(self, steps: int):
901    def enable_step_logging(self, steps: int):
902        steps = int(max(0, steps))
903        if steps == 0:
904            self._step_log_buffer = None
905            self._step_log_index = 0
906            return
907        self._step_log_buffer = np.zeros((steps, self.N), dtype=np.int8)
908        self._step_log_index = 0
def consume_step_log(self) -> numpy.ndarray:
910    def consume_step_log(self) -> np.ndarray:
911        if self._step_log_buffer is None:
912            return np.zeros((0, self.N), dtype=np.int8)
913        filled = min(self._step_log_index, self._step_log_buffer.shape[0])
914        data = self._step_log_buffer[:filled].copy()
915        self._step_log_buffer = None
916        self._step_log_index = 0
917        return data
def enable_diff_logging(self, steps: int):
919    def enable_diff_logging(self, steps: int):
920        """Allocate a diff-log buffer for `steps` asynchronous updates.
921
922        Examples
923        --------
924        >>> net = BinaryNetwork("log-demo")
925        >>> pop = BinaryNeuronPopulation(net, N=1, threshold=0.0, initializer=[0])
926        >>> net.add_population(pop)
927        <...BinaryNeuronPopulation object...>
928        >>> net.initialize(weight_mode="dense")
929        >>> net.enable_diff_logging(steps=3)
930        >>> net._diff_log_updates.shape
931        (3,)
932        """
933        steps = int(max(0, steps))
934        if steps == 0:
935            self._diff_log_updates = None
936            self._diff_log_deltas = None
937            self._diff_log_index = 0
938            return
939        self._diff_log_updates = np.zeros(steps, dtype=np.uint16)
940        self._diff_log_deltas = np.zeros(steps, dtype=np.int8)
941        self._diff_log_index = 0

Allocate a diff-log buffer for steps asynchronous updates.

Examples
>>> net = BinaryNetwork("log-demo")
>>> pop = BinaryNeuronPopulation(net, N=1, threshold=0.0, initializer=[0])
>>> net.add_population(pop)
<...BinaryNeuronPopulation object...>
>>> net.initialize(weight_mode="dense")
>>> net.enable_diff_logging(steps=3)
>>> net._diff_log_updates.shape
(3,)
def consume_diff_log(self) -> tuple[numpy.ndarray, numpy.ndarray]:
943    def consume_diff_log(self) -> tuple[np.ndarray, np.ndarray]:
944        """Return the recorded diff log as `(updates, deltas)` row arrays.
945
946        Expected output
947        ---------------
948        The returned arrays have shape `(1, recorded_steps)` for the current
949        simulation backend.
950        """
951        if self._diff_log_updates is None or self._diff_log_deltas is None:
952            return np.zeros((1, 0), dtype=np.uint16), np.zeros((1, 0), dtype=np.int8)
953        filled = min(self._diff_log_index, self._diff_log_updates.shape[0])
954        updates = self._diff_log_updates[:filled].copy()[None, :]
955        deltas = self._diff_log_deltas[:filled].copy()[None, :]
956        self._diff_log_updates = None
957        self._diff_log_deltas = None
958        self._diff_log_index = 0
959        return updates, deltas

Return the recorded diff log as (updates, deltas) row arrays.

Expected output

The returned arrays have shape (1, recorded_steps) for the current simulation backend.

@staticmethod
def reconstruct_states_from_diff_logs( initial_state: numpy.ndarray, updates: numpy.ndarray, deltas: numpy.ndarray, *, sample_interval: int = 1) -> numpy.ndarray:
961    @staticmethod
962    def reconstruct_states_from_diff_logs(
963        initial_state: np.ndarray,
964        updates: np.ndarray,
965        deltas: np.ndarray,
966        *,
967        sample_interval: int = 1,
968    ) -> np.ndarray:
969        """Reconstruct sampled full network states from diff-log traces.
970
971        Examples
972        --------
973        >>> BinaryNetwork.reconstruct_states_from_diff_logs(
974        ...     initial_state=np.array([0, 0], dtype=np.uint8),
975        ...     updates=np.array([[0, 1, 0]], dtype=np.uint16),
976        ...     deltas=np.array([[1, 1, -1]], dtype=np.int8),
977        ... )
978        array([[1, 0],
979               [1, 1],
980               [0, 1]], dtype=uint8)
981        """
982        update_arr = np.asarray(updates, dtype=np.int64)
983        delta_arr = np.asarray(deltas, dtype=np.int8)
984        if update_arr.ndim == 1:
985            update_arr = update_arr[None, :]
986        if delta_arr.ndim == 1:
987            delta_arr = delta_arr[None, :]
988        state = np.asarray(initial_state, dtype=np.int8).ravel().copy()
989        if update_arr.ndim != 2 or delta_arr.shape != update_arr.shape:
990            return np.zeros((0, state.size), dtype=np.uint8)
991        stride = max(1, int(sample_interval))
992        return _reconstruct_states_kernel(state, update_arr, delta_arr, stride)

Reconstruct sampled full network states from diff-log traces.

Examples
>>> BinaryNetwork.reconstruct_states_from_diff_logs(
...     initial_state=np.array([0, 0], dtype=np.uint8),
...     updates=np.array([[0, 1, 0]], dtype=np.uint16),
...     deltas=np.array([[1, 1, -1]], dtype=np.int8),
... )
array([[1, 0],
       [1, 1],
       [0, 1]], dtype=uint8)
def population_rates_from_diff_logs( self, initial_state: numpy.ndarray, updates: numpy.ndarray, deltas: numpy.ndarray, *, sample_interval: int = 1, populations: Optional[Sequence[BinaryNetwork.BinaryNetwork.Neuron]] = None) -> numpy.ndarray:
 994    def population_rates_from_diff_logs(
 995        self,
 996        initial_state: np.ndarray,
 997        updates: np.ndarray,
 998        deltas: np.ndarray,
 999        *,
1000        sample_interval: int = 1,
1001        populations: Optional[Sequence[Neuron]] = None,
1002    ) -> np.ndarray:
1003        """Compute per-population activity traces directly from diff logs.
1004
1005        Examples
1006        --------
1007        >>> net = BinaryNetwork("rates-demo")
1008        >>> pop_a = BinaryNeuronPopulation(net, N=1, initializer=[0])
1009        >>> pop_b = BinaryNeuronPopulation(net, N=1, initializer=[0])
1010        >>> net.add_population(pop_a)
1011        <...BinaryNeuronPopulation object...>
1012        >>> net.add_population(pop_b)
1013        <...BinaryNeuronPopulation object...>
1014        >>> net.initialize(weight_mode="dense")
1015        >>> rates = net.population_rates_from_diff_logs(
1016        ...     np.array([0, 0], dtype=np.uint8),
1017        ...     np.array([[0, 1, 0]], dtype=np.uint16),
1018        ...     np.array([[1, 1, -1]], dtype=np.int8),
1019        ... )
1020        >>> rates.tolist()
1021        [[1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]
1022        """
1023        pops = list(self.population if populations is None else populations)
1024        if not pops:
1025            return np.zeros((0, 0), dtype=np.float32)
1026        update_arr = np.asarray(updates, dtype=np.int64)
1027        delta_arr = np.asarray(deltas, dtype=np.int8)
1028        if update_arr.ndim == 1:
1029            update_arr = update_arr[None, :]
1030        if delta_arr.ndim == 1:
1031            delta_arr = delta_arr[None, :]
1032        if update_arr.ndim != 2 or delta_arr.shape != update_arr.shape:
1033            return np.zeros((0, len(pops)), dtype=np.float32)
1034        initial = np.asarray(initial_state, dtype=np.int8).ravel()
1035        pop_count = len(pops)
1036        sizes = np.zeros(pop_count, dtype=np.float32)
1037        cluster_of = np.empty(initial.size, dtype=np.int32)
1038        cluster_sums = np.zeros(pop_count, dtype=np.int64)
1039        for idx, pop in enumerate(pops):
1040            start, end = int(pop.view[0]), int(pop.view[1])
1041            cluster_of[start:end] = idx
1042            sizes[idx] = max(1, end - start)
1043            cluster_sums[idx] = int(initial[start:end].sum())
1044        stride = max(1, int(sample_interval))
1045        return _population_rates_kernel(cluster_sums, sizes, cluster_of, update_arr, delta_arr, stride)

Compute per-population activity traces directly from diff logs.

Examples
>>> net = BinaryNetwork("rates-demo")
>>> pop_a = BinaryNeuronPopulation(net, N=1, initializer=[0])
>>> pop_b = BinaryNeuronPopulation(net, N=1, initializer=[0])
>>> net.add_population(pop_a)
<...BinaryNeuronPopulation object...>
>>> net.add_population(pop_b)
<...BinaryNeuronPopulation object...>
>>> net.initialize(weight_mode="dense")
>>> rates = net.population_rates_from_diff_logs(
...     np.array([0, 0], dtype=np.uint8),
...     np.array([[0, 1, 0]], dtype=np.uint16),
...     np.array([[1, 1, -1]], dtype=np.int8),
... )
>>> rates.tolist()
[[1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]
@staticmethod
def extract_spike_events_from_diff_logs( updates: numpy.ndarray, deltas: numpy.ndarray) -> tuple[numpy.ndarray, numpy.ndarray]:
1047    @staticmethod
1048    def extract_spike_events_from_diff_logs(
1049        updates: np.ndarray,
1050        deltas: np.ndarray,
1051    ) -> tuple[np.ndarray, np.ndarray]:
1052        """Extract onset events `(times, neuron_ids)` from diff-log traces.
1053
1054        Examples
1055        --------
1056        >>> times, ids = BinaryNetwork.extract_spike_events_from_diff_logs(
1057        ...     np.array([[0, 1, 0]], dtype=np.uint16),
1058        ...     np.array([[1, 1, -1]], dtype=np.int8),
1059        ... )
1060        >>> times.tolist()
1061        [0.0, 1.0]
1062        >>> ids.tolist()
1063        [0, 1]
1064        """
1065        update_arr = np.asarray(updates, dtype=np.int64)
1066        delta_arr = np.asarray(deltas, dtype=np.int8)
1067        if update_arr.ndim == 1:
1068            update_arr = update_arr[None, :]
1069        if delta_arr.ndim == 1:
1070            delta_arr = delta_arr[None, :]
1071        if update_arr.ndim != 2 or delta_arr.shape != update_arr.shape:
1072            return np.zeros(0, dtype=np.float64), np.zeros(0, dtype=np.int64)
1073        mask = delta_arr > 0
1074        if not mask.any():
1075            return np.zeros(0, dtype=np.float64), np.zeros(0, dtype=np.int64)
1076        per_step, steps = update_arr.shape
1077        times = np.repeat(np.arange(steps, dtype=np.int64), per_step)
1078        flat_updates = update_arr.reshape(-1, order="F")
1079        flat_mask = mask.reshape(-1, order="F")
1080        return times[flat_mask].astype(np.float64, copy=False), flat_updates[flat_mask].astype(np.int64, copy=False)

Extract onset events (times, neuron_ids) from diff-log traces.

Examples
>>> times, ids = BinaryNetwork.extract_spike_events_from_diff_logs(
...     np.array([[0, 1, 0]], dtype=np.uint16),
...     np.array([[1, 1, -1]], dtype=np.int8),
... )
>>> times.tolist()
[0.0, 1.0]
>>> ids.tolist()
[0, 1]
def warm_numba_caches() -> None:
186def warm_numba_caches() -> None:
187    """Compile and cache the numba kernels once per process."""
188    global _NUMBA_WARMED
189    if _NUMBA_WARMED:
190        return
191    neurons = np.array([0], dtype=np.int64)
192    state = np.zeros(2, dtype=np.int8)
193    field = np.zeros(2, dtype=np.float32)
194    thresholds = np.zeros(2, dtype=np.float32)
195    weights = np.zeros((2, 2), dtype=np.float32)
196    log_states = np.zeros((1, 2), dtype=np.int8)
197    update_log = np.zeros(1, dtype=np.uint16)
198    delta_log = np.zeros(1, dtype=np.int8)
199    _dense_batch_kernel(
200        neurons,
201        state.copy(),
202        field.copy(),
203        thresholds,
204        weights,
205        log_states,
206        False,
207        0,
208        update_log,
209        delta_log,
210        False,
211        0,
212    )
213    _sparse_batch_kernel(
214        neurons,
215        state.copy(),
216        field.copy(),
217        thresholds,
218        np.zeros(0, dtype=np.float32),
219        np.zeros(0, dtype=np.int32),
220        np.array([0, 0], dtype=np.int32),
221        log_states,
222        False,
223        0,
224        update_log,
225        delta_log,
226        False,
227        0,
228    )
229    _reconstruct_states_kernel(
230        np.zeros(2, dtype=np.int8),
231        np.zeros((1, 1), dtype=np.int64),
232        np.zeros((1, 1), dtype=np.int8),
233        1,
234    )
235    _population_rates_kernel(
236        np.zeros(1, dtype=np.int64),
237        np.ones(1, dtype=np.float32),
238        np.zeros(2, dtype=np.int32),
239        np.zeros((1, 1), dtype=np.int64),
240        np.zeros((1, 1), dtype=np.int8),
241        1,
242    )
243    _NUMBA_WARMED = True

Compile and cache the numba kernels once per process.