spiketools.conversion
Conversion utilities for the canonical spiketimes representation.
The package-wide spike format is a float array of shape (2, n_spikes):
- row
0: spike times in milliseconds - row
1: trial, unit, or neuron indices
Examples
Shared example setup used throughout the documentation:
import numpy as np
from spiketools import gamma_spikes, spiketimes_to_binary, spiketimes_to_list
np.random.seed(0)
rates = np.array([6.0] * 10 + [5.6, 6.3, 5.9, 6.5, 5.8, 6.1, 5.7, 6.4, 6.0, 5.5], dtype=float)
orders = np.array([0.2] * 10 + [1.0, 2.0, 2.0, 3.0, 1.0, 2.0, 3.0, 2.0, 1.0, 3.0], dtype=float)
spiketimes = gamma_spikes(rates=rates, order=orders, tlim=[0.0, 5000.0], dt=1.0)
binary, time = spiketimes_to_binary(spiketimes, tlim=[0.0, 5000.0], dt=50.0)
trains = spiketimes_to_list(spiketimes)
1"""Conversion utilities for the canonical `spiketimes` representation. 2 3The package-wide spike format is a `float` array of shape `(2, n_spikes)`: 4 5- row `0`: spike times in milliseconds 6- row `1`: trial, unit, or neuron indices 7 8Examples 9-------- 10Shared example setup used throughout the documentation: 11 12```python 13import numpy as np 14 15from spiketools import gamma_spikes, spiketimes_to_binary, spiketimes_to_list 16 17np.random.seed(0) 18rates = np.array([6.0] * 10 + [5.6, 6.3, 5.9, 6.5, 5.8, 6.1, 5.7, 6.4, 6.0, 5.5], dtype=float) 19orders = np.array([0.2] * 10 + [1.0, 2.0, 2.0, 3.0, 1.0, 2.0, 3.0, 2.0, 1.0, 3.0], dtype=float) 20spiketimes = gamma_spikes(rates=rates, order=orders, tlim=[0.0, 5000.0], dt=1.0) 21 22binary, time = spiketimes_to_binary(spiketimes, tlim=[0.0, 5000.0], dt=50.0) 23trains = spiketimes_to_list(spiketimes) 24``` 25""" 26 27from __future__ import annotations 28 29from bisect import bisect_right 30from typing import Iterable, Optional, Sequence 31 32import numpy as np 33import pylab 34 35__all__ = [ 36 "binary_to_spiketimes", 37 "cut_spiketimes", 38 "get_time_limits", 39 "spiketimes_to_binary", 40 "spiketimes_to_list", 41] 42 43 44def get_time_limits(spiketimes: np.ndarray) -> list[float]: 45 """ 46 Infer time limits from a `spiketimes` array. 47 48 Parameters 49 ---------- 50 spiketimes: 51 Canonical spike representation with shape `(2, n_spikes)`. 52 53 Returns 54 ------- 55 list[float] 56 `[tmin, tmax]` with inclusive start and exclusive end. If inference 57 fails, `[0.0, 1.0]` is returned as a safe fallback. 58 59 Examples 60 -------- 61 >>> spikes = np.array([[5.0, 8.0], [0.0, 1.0]]) 62 >>> get_time_limits(spikes) 63 [5.0, 9.0] 64 """ 65 try: 66 tlim = [ 67 float(pylab.nanmin(spiketimes[0, :])), 68 float(pylab.nanmax(spiketimes[0, :]) + 1), 69 ] 70 except Exception: 71 tlim = [0.0, 1.0] 72 if pylab.isnan(tlim).any(): 73 tlim = [0.0, 1.0] 74 return tlim 75 76 77def spiketimes_to_binary( 78 spiketimes: np.ndarray, 79 tlim: Optional[Sequence[float]] = None, 80 dt: float = 1.0, 81) -> tuple[np.ndarray, np.ndarray]: 82 """ 83 Convert `spiketimes` into a spike-count matrix on a regular time grid. 84 85 Parameters 86 ---------- 87 spiketimes: 88 Canonical spike representation. Row `0` stores times in ms, row `1` 89 stores trial or unit indices. 90 tlim: 91 Two-element sequence defining [tmin, tmax] in ms. Defaults to inferred bounds. 92 dt: 93 Temporal resolution in ms. 94 95 Returns 96 ------- 97 tuple[np.ndarray, np.ndarray] 98 `(binary, time)` where `binary.shape == (n_trials, n_bins)` and 99 `time` contains the left bin edges in ms. 100 101 Notes 102 ----- 103 The output is not strictly binary if multiple spikes land in the same bin. 104 In that case the matrix stores spike counts per bin. 105 106 Examples 107 -------- 108 >>> spikes = np.array([[0.0, 1.0, 1.0], [0.0, 0.0, 1.0]]) 109 >>> binary, time = spiketimes_to_binary(spikes, tlim=[0.0, 3.0], dt=1.0) 110 >>> binary.astype(int).tolist() 111 [[1, 1, 0], [0, 1, 0]] 112 >>> time.tolist() 113 [-0.5, 0.5, 1.5] 114 """ 115 if tlim is None: 116 tlim = get_time_limits(spiketimes) 117 118 time = pylab.arange(tlim[0], tlim[1] + dt, dt).astype(float) 119 if dt <= 1: 120 time -= 0.5 * float(dt) 121 122 trials = pylab.array([-1] + list(range(int(spiketimes[1, :].max() + 1)))) + 0.5 123 124 tlim_spikes = cut_spiketimes(spiketimes, tlim) 125 tlim_spikes = tlim_spikes[:, pylab.isnan(tlim_spikes[0, :]) == False] 126 127 if tlim_spikes.shape[1] > 0: 128 binary = pylab.histogram2d( 129 tlim_spikes[0, :], tlim_spikes[1, :], [time, trials] 130 )[0].T 131 else: 132 binary = pylab.zeros((len(trials) - 1, len(time) - 1)) 133 return binary, time[:-1] 134 135 136def binary_to_spiketimes(binary: np.ndarray, time: Iterable[float]) -> np.ndarray: 137 """ 138 Convert a binned spike matrix into canonical `spiketimes`. 139 140 Parameters 141 ---------- 142 binary: 143 Array with shape `(n_trials, n_bins)`. Values larger than `1` are 144 interpreted as multiple spikes in the same bin. 145 time: 146 One time value per bin, typically the left bin edges in ms. 147 148 Returns 149 ------- 150 np.ndarray 151 `float` array with shape `(2, n_spikes_or_markers)`. 152 153 Notes 154 ----- 155 Trials with no spikes are retained via placeholder columns 156 `[nan, trial_id]`. 157 158 This function assumes that each non-zero entry denotes one or more spike 159 events in that time bin. It is not intended for persistent state matrices 160 where a neuron stays at `1` across consecutive bins until the next update. 161 162 Examples 163 -------- 164 Convert a spike-count raster on a regular grid: 165 166 >>> binary_to_spiketimes( 167 ... np.array([[1, 0, 2], [0, 0, 0]]), 168 ... [0.0, 1.0, 2.0], 169 ... ).tolist() 170 [[0.0, 2.0, 2.0, nan], [0.0, 0.0, 0.0, 1.0]] 171 """ 172 time = pylab.array(time) 173 spiketimes: list[list[float]] = [[], []] 174 max_count = binary.max() 175 spikes = binary.copy() 176 while max_count > 0: 177 if max_count == 1: 178 trial, t_index = spikes.nonzero() 179 else: 180 trial, t_index = pylab.where(spikes == max_count) 181 spiketimes[0] += time[t_index].tolist() 182 spiketimes[1] += trial.tolist() 183 spikes[spikes == max_count] -= 1 184 max_count -= 1 185 spiketimes_array = pylab.array(spiketimes) 186 187 all_trials = set(range(binary.shape[0])) 188 found_trials = set(spiketimes_array[1, :]) 189 missing_trials = list(all_trials.difference(found_trials)) 190 for mt in missing_trials: 191 spiketimes_array = pylab.append( 192 spiketimes_array, pylab.array([[pylab.nan], [mt]]), axis=1 193 ) 194 return spiketimes_array.astype(float) 195 196 197def spiketimes_to_list(spiketimes: np.ndarray) -> list[np.ndarray]: 198 """ 199 Convert canonical `spiketimes` into one array per trial or unit. 200 201 Parameters 202 ---------- 203 spiketimes: 204 Spike array with times in row `0` and indices in row `1`. 205 206 Returns 207 ------- 208 list[np.ndarray] 209 `result[i]` contains the spike times of trial or unit `i`. 210 211 Examples 212 -------- 213 >>> spiketimes_to_list(np.array([[1.0, 2.0, 4.0], [0.0, 1.0, 1.0]])) 214 [array([1.]), array([2., 4.])] 215 """ 216 if spiketimes.shape[1] == 0: 217 return [] 218 trials = range(int(spiketimes[1, :].max() + 1)) 219 orderedspiketimes = spiketimes.copy() 220 orderedspiketimes = orderedspiketimes[ 221 :, pylab.isnan(orderedspiketimes[0]) == False 222 ] 223 spike_order = pylab.argsort(orderedspiketimes[0], kind="mergesort") 224 orderedspiketimes = orderedspiketimes[:, spike_order] 225 trial_order = pylab.argsort(orderedspiketimes[1], kind="mergesort") 226 orderedspiketimes = orderedspiketimes[:, trial_order] 227 228 spikelist: list[Optional[np.ndarray]] = [None] * len(trials) 229 start = 0 230 for trial in trials: 231 end = bisect_right(orderedspiketimes[1], trial) 232 trialspikes = orderedspiketimes[0, start:end] 233 start = end 234 spikelist[trial] = trialspikes 235 return [spikes if spikes is not None else pylab.array([]) for spikes in spikelist] 236 237 238def cut_spiketimes(spiketimes: np.ndarray, tlim: Sequence[float]) -> np.ndarray: 239 """ 240 Restrict `spiketimes` to a time interval. 241 242 Parameters 243 ---------- 244 spiketimes: 245 Canonical spike representation. 246 tlim: 247 Two-element sequence `[tmin, tmax]` in ms. The interval is inclusive at 248 the start and exclusive at the end. 249 250 Returns 251 ------- 252 np.ndarray 253 Cropped spike array. Empty trials are still represented by 254 `[nan, trial_id]` markers. 255 256 Examples 257 -------- 258 >>> cut_spiketimes( 259 ... np.array([[1.0, 3.0, 5.0], [0.0, 0.0, 1.0]]), 260 ... [2.0, 5.0], 261 ... ).tolist() 262 [[3.0, nan], [0.0, 1.0]] 263 """ 264 alltrials = list(set(spiketimes[1, :])) 265 cut_spikes = spiketimes[:, pylab.isfinite(spiketimes[0])] 266 cut_spikes = cut_spikes[:, cut_spikes[0, :] >= tlim[0]] 267 268 if cut_spikes.shape[1] > 0: 269 cut_spikes = cut_spikes[:, cut_spikes[0, :] < tlim[1]] 270 for trial in alltrials: 271 if trial not in cut_spikes[1, :]: 272 cut_spikes = pylab.append( 273 cut_spikes, pylab.array([[pylab.nan], [trial]]), axis=1 274 ) 275 return cut_spikes 276 277 278_get_tlim = get_time_limits
137def binary_to_spiketimes(binary: np.ndarray, time: Iterable[float]) -> np.ndarray: 138 """ 139 Convert a binned spike matrix into canonical `spiketimes`. 140 141 Parameters 142 ---------- 143 binary: 144 Array with shape `(n_trials, n_bins)`. Values larger than `1` are 145 interpreted as multiple spikes in the same bin. 146 time: 147 One time value per bin, typically the left bin edges in ms. 148 149 Returns 150 ------- 151 np.ndarray 152 `float` array with shape `(2, n_spikes_or_markers)`. 153 154 Notes 155 ----- 156 Trials with no spikes are retained via placeholder columns 157 `[nan, trial_id]`. 158 159 This function assumes that each non-zero entry denotes one or more spike 160 events in that time bin. It is not intended for persistent state matrices 161 where a neuron stays at `1` across consecutive bins until the next update. 162 163 Examples 164 -------- 165 Convert a spike-count raster on a regular grid: 166 167 >>> binary_to_spiketimes( 168 ... np.array([[1, 0, 2], [0, 0, 0]]), 169 ... [0.0, 1.0, 2.0], 170 ... ).tolist() 171 [[0.0, 2.0, 2.0, nan], [0.0, 0.0, 0.0, 1.0]] 172 """ 173 time = pylab.array(time) 174 spiketimes: list[list[float]] = [[], []] 175 max_count = binary.max() 176 spikes = binary.copy() 177 while max_count > 0: 178 if max_count == 1: 179 trial, t_index = spikes.nonzero() 180 else: 181 trial, t_index = pylab.where(spikes == max_count) 182 spiketimes[0] += time[t_index].tolist() 183 spiketimes[1] += trial.tolist() 184 spikes[spikes == max_count] -= 1 185 max_count -= 1 186 spiketimes_array = pylab.array(spiketimes) 187 188 all_trials = set(range(binary.shape[0])) 189 found_trials = set(spiketimes_array[1, :]) 190 missing_trials = list(all_trials.difference(found_trials)) 191 for mt in missing_trials: 192 spiketimes_array = pylab.append( 193 spiketimes_array, pylab.array([[pylab.nan], [mt]]), axis=1 194 ) 195 return spiketimes_array.astype(float)
Convert a binned spike matrix into canonical spiketimes.
Parameters
- binary:: Array with shape
(n_trials, n_bins). Values larger than1are interpreted as multiple spikes in the same bin. - time:: One time value per bin, typically the left bin edges in ms.
Returns
- np.ndarray:
floatarray with shape(2, n_spikes_or_markers).
Notes
Trials with no spikes are retained via placeholder columns
[nan, trial_id].
This function assumes that each non-zero entry denotes one or more spike
events in that time bin. It is not intended for persistent state matrices
where a neuron stays at 1 across consecutive bins until the next update.
Examples
Convert a spike-count raster on a regular grid:
>>> binary_to_spiketimes(
... np.array([[1, 0, 2], [0, 0, 0]]),
... [0.0, 1.0, 2.0],
... ).tolist()
[[0.0, 2.0, 2.0, nan], [0.0, 0.0, 0.0, 1.0]]
239def cut_spiketimes(spiketimes: np.ndarray, tlim: Sequence[float]) -> np.ndarray: 240 """ 241 Restrict `spiketimes` to a time interval. 242 243 Parameters 244 ---------- 245 spiketimes: 246 Canonical spike representation. 247 tlim: 248 Two-element sequence `[tmin, tmax]` in ms. The interval is inclusive at 249 the start and exclusive at the end. 250 251 Returns 252 ------- 253 np.ndarray 254 Cropped spike array. Empty trials are still represented by 255 `[nan, trial_id]` markers. 256 257 Examples 258 -------- 259 >>> cut_spiketimes( 260 ... np.array([[1.0, 3.0, 5.0], [0.0, 0.0, 1.0]]), 261 ... [2.0, 5.0], 262 ... ).tolist() 263 [[3.0, nan], [0.0, 1.0]] 264 """ 265 alltrials = list(set(spiketimes[1, :])) 266 cut_spikes = spiketimes[:, pylab.isfinite(spiketimes[0])] 267 cut_spikes = cut_spikes[:, cut_spikes[0, :] >= tlim[0]] 268 269 if cut_spikes.shape[1] > 0: 270 cut_spikes = cut_spikes[:, cut_spikes[0, :] < tlim[1]] 271 for trial in alltrials: 272 if trial not in cut_spikes[1, :]: 273 cut_spikes = pylab.append( 274 cut_spikes, pylab.array([[pylab.nan], [trial]]), axis=1 275 ) 276 return cut_spikes
Restrict spiketimes to a time interval.
Parameters
- spiketimes:: Canonical spike representation.
- tlim:: Two-element sequence
[tmin, tmax]in ms. The interval is inclusive at the start and exclusive at the end.
Returns
- np.ndarray: Cropped spike array. Empty trials are still represented by
[nan, trial_id]markers.
Examples
>>> cut_spiketimes(
... np.array([[1.0, 3.0, 5.0], [0.0, 0.0, 1.0]]),
... [2.0, 5.0],
... ).tolist()
[[3.0, nan], [0.0, 1.0]]
45def get_time_limits(spiketimes: np.ndarray) -> list[float]: 46 """ 47 Infer time limits from a `spiketimes` array. 48 49 Parameters 50 ---------- 51 spiketimes: 52 Canonical spike representation with shape `(2, n_spikes)`. 53 54 Returns 55 ------- 56 list[float] 57 `[tmin, tmax]` with inclusive start and exclusive end. If inference 58 fails, `[0.0, 1.0]` is returned as a safe fallback. 59 60 Examples 61 -------- 62 >>> spikes = np.array([[5.0, 8.0], [0.0, 1.0]]) 63 >>> get_time_limits(spikes) 64 [5.0, 9.0] 65 """ 66 try: 67 tlim = [ 68 float(pylab.nanmin(spiketimes[0, :])), 69 float(pylab.nanmax(spiketimes[0, :]) + 1), 70 ] 71 except Exception: 72 tlim = [0.0, 1.0] 73 if pylab.isnan(tlim).any(): 74 tlim = [0.0, 1.0] 75 return tlim
Infer time limits from a spiketimes array.
Parameters
- spiketimes:: Canonical spike representation with shape
(2, n_spikes).
Returns
- list[float]:
[tmin, tmax]with inclusive start and exclusive end. If inference fails,[0.0, 1.0]is returned as a safe fallback.
Examples
>>> spikes = np.array([[5.0, 8.0], [0.0, 1.0]])
>>> get_time_limits(spikes)
[5.0, 9.0]
78def spiketimes_to_binary( 79 spiketimes: np.ndarray, 80 tlim: Optional[Sequence[float]] = None, 81 dt: float = 1.0, 82) -> tuple[np.ndarray, np.ndarray]: 83 """ 84 Convert `spiketimes` into a spike-count matrix on a regular time grid. 85 86 Parameters 87 ---------- 88 spiketimes: 89 Canonical spike representation. Row `0` stores times in ms, row `1` 90 stores trial or unit indices. 91 tlim: 92 Two-element sequence defining [tmin, tmax] in ms. Defaults to inferred bounds. 93 dt: 94 Temporal resolution in ms. 95 96 Returns 97 ------- 98 tuple[np.ndarray, np.ndarray] 99 `(binary, time)` where `binary.shape == (n_trials, n_bins)` and 100 `time` contains the left bin edges in ms. 101 102 Notes 103 ----- 104 The output is not strictly binary if multiple spikes land in the same bin. 105 In that case the matrix stores spike counts per bin. 106 107 Examples 108 -------- 109 >>> spikes = np.array([[0.0, 1.0, 1.0], [0.0, 0.0, 1.0]]) 110 >>> binary, time = spiketimes_to_binary(spikes, tlim=[0.0, 3.0], dt=1.0) 111 >>> binary.astype(int).tolist() 112 [[1, 1, 0], [0, 1, 0]] 113 >>> time.tolist() 114 [-0.5, 0.5, 1.5] 115 """ 116 if tlim is None: 117 tlim = get_time_limits(spiketimes) 118 119 time = pylab.arange(tlim[0], tlim[1] + dt, dt).astype(float) 120 if dt <= 1: 121 time -= 0.5 * float(dt) 122 123 trials = pylab.array([-1] + list(range(int(spiketimes[1, :].max() + 1)))) + 0.5 124 125 tlim_spikes = cut_spiketimes(spiketimes, tlim) 126 tlim_spikes = tlim_spikes[:, pylab.isnan(tlim_spikes[0, :]) == False] 127 128 if tlim_spikes.shape[1] > 0: 129 binary = pylab.histogram2d( 130 tlim_spikes[0, :], tlim_spikes[1, :], [time, trials] 131 )[0].T 132 else: 133 binary = pylab.zeros((len(trials) - 1, len(time) - 1)) 134 return binary, time[:-1]
Convert spiketimes into a spike-count matrix on a regular time grid.
Parameters
- spiketimes:: Canonical spike representation. Row
0stores times in ms, row1stores trial or unit indices. - tlim:: Two-element sequence defining [tmin, tmax] in ms. Defaults to inferred bounds.
- dt:: Temporal resolution in ms.
Returns
- tuple[np.ndarray, np.ndarray]:
(binary, time)wherebinary.shape == (n_trials, n_bins)andtimecontains the left bin edges in ms.
Notes
The output is not strictly binary if multiple spikes land in the same bin. In that case the matrix stores spike counts per bin.
Examples
>>> spikes = np.array([[0.0, 1.0, 1.0], [0.0, 0.0, 1.0]])
>>> binary, time = spiketimes_to_binary(spikes, tlim=[0.0, 3.0], dt=1.0)
>>> binary.astype(int).tolist()
[[1, 1, 0], [0, 1, 0]]
>>> time.tolist()
[-0.5, 0.5, 1.5]
198def spiketimes_to_list(spiketimes: np.ndarray) -> list[np.ndarray]: 199 """ 200 Convert canonical `spiketimes` into one array per trial or unit. 201 202 Parameters 203 ---------- 204 spiketimes: 205 Spike array with times in row `0` and indices in row `1`. 206 207 Returns 208 ------- 209 list[np.ndarray] 210 `result[i]` contains the spike times of trial or unit `i`. 211 212 Examples 213 -------- 214 >>> spiketimes_to_list(np.array([[1.0, 2.0, 4.0], [0.0, 1.0, 1.0]])) 215 [array([1.]), array([2., 4.])] 216 """ 217 if spiketimes.shape[1] == 0: 218 return [] 219 trials = range(int(spiketimes[1, :].max() + 1)) 220 orderedspiketimes = spiketimes.copy() 221 orderedspiketimes = orderedspiketimes[ 222 :, pylab.isnan(orderedspiketimes[0]) == False 223 ] 224 spike_order = pylab.argsort(orderedspiketimes[0], kind="mergesort") 225 orderedspiketimes = orderedspiketimes[:, spike_order] 226 trial_order = pylab.argsort(orderedspiketimes[1], kind="mergesort") 227 orderedspiketimes = orderedspiketimes[:, trial_order] 228 229 spikelist: list[Optional[np.ndarray]] = [None] * len(trials) 230 start = 0 231 for trial in trials: 232 end = bisect_right(orderedspiketimes[1], trial) 233 trialspikes = orderedspiketimes[0, start:end] 234 start = end 235 spikelist[trial] = trialspikes 236 return [spikes if spikes is not None else pylab.array([]) for spikes in spikelist]
Convert canonical spiketimes into one array per trial or unit.
Parameters
- spiketimes:: Spike array with times in row
0and indices in row1.
Returns
- list[np.ndarray]:
result[i]contains the spike times of trial or uniti.
Examples
>>> spiketimes_to_list(np.array([[1.0, 2.0, 4.0], [0.0, 1.0, 1.0]]))
[array([1.]), array([2., 4.])]