@rth please try this.
Note: This structure writes on the memory. If you’re dealing with gigantic data or have a very old hard drive, this operation may be the bottleneck of your whole simulation!
from brian2 import *
import numpy as np
start_scope()
thr = -10
v_reset = -60
# lets' make some populations
nPops = 3
pops = []
for n in range(nPops):
pop = NeuronGroup(
N = 5,
model= """
dv/dt = (-v + I)/tau : 1
I: 1
tau: second
""",
threshold="v>thr",
reset="v=v_reset",
method="euler",
)
pop.tau[:] = abs(np.random.normal(10, 3))*ms
pop.v[:] = np.random.normal(-75, 3)
pop.I[:] = 1*(pop.i+1)
pops.append(pop)
# and connect them all together with synapses
syns = []
for l in range(nPops):
for m in range(nPops):
syn = Synapses(source = pops[l], target = pops[m],
model = """w : 1""",
on_pre = """v += w""",
method = 'euler',
)
syn.connect(condition = 'i!=j', p=0.1)
syn.w = np.random.normal(0.5,0.02, size=len(syn))
syns.append(syn)
net = Network(collect()) # collects nothing
net.add(pops)
net.add(syns)
# this function redefines the monitor objects conveniently
def monitor_maker(syns):
monitors = []
for syn in syns:
monitor = StateMonitor(syn, 'w', record=True)
monitors.append(monitor)
return monitors
# and this one just saves the recorded vars on the hard disk
def intra_sim_process(monitors, chunk_id):
for monitor in monitors:
np.save('w_'+str(chunk_id)+'.npy', monitor.w)
# let's a run simulation of total duration 1 second. We break it down
# to 10 subsimulation of duration 100 ms to avoid memory cloggage
for chunk in range(10):
print("*"*10 + " CHUNK ID : {} ".format(chunk)+ "*"*10 + "\n")
# add monitors
monitors = monitor_maker(syns)
net.add(monitors)
net.run(100*ms, report='text', report_period=20*ms)
intra_sim_process(monitors, chunk)
net.remove(monitors)
If you spotted anything wrong or saw potential improvements, please don’t hesitate to leave it as a reply below. I’d be thankful.
Arash