Resetting spike and rate monitors in C++ standalone mode

Description of problem

I am trying to find a fast way to find an optimal parameter setting to see when the neurons from my CA3 network have the highest spatial information.
Therefore, I run the network once before the optimization problem, and then pass the built network into the function that scipy uses to find a minimum for the metric I defined. I have tried the standard way with net.store(‘base‘) and then calling net.restore(‘base‘) inside the minimisation function, which works correctly. Now, given the simulations are relatively slow, I want to speed things up using the cpp_standalone mode (doesn’t have to be C++, cython would also work). The only thing I have to do is emptying the spike and rate monitors inside the minimisation function, the rest of the network doesn’t have to be reset.

Minimal code to reproduce problem

import numpy as np
import scipy as sp
from scipy.optimize import minimize
from brian2 import *
set_device('cpp_standalone', build_on_run = False)
from joblib import Parallel, delayed
import os
import logging
import datetime

datestr = datetime.datetime.now().isoformat(timespec='seconds')
scriptname = f'optimize-weights-{datestr}'

savestr = f'outputs/{scriptname}'
os.makedirs(savestr, exist_ok=True)


# Define log directory and file path
log_dir = savestr
log_file = f"{log_dir}/{scriptname}.log"

# Ensure the log directory exists
logging.basicConfig(filename=log_file, level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")

# ------ Parameters
namespace = {'random_seed': 0, 'simtime': 20. * second, 'simperiod': 15. * second, 't_thermalize': 5. * second, 'bin_size': 10. * msecond, 'NE': 4000, 'NI': 1000, 'simulation_timestep': 100. * usecond, 'tau_exc': 10. * msecond, 'tau_inh': 10. * msecond, 'El': -70. * mvolt, 'Ee': 0. * volt, 'Ei': -80. * mvolt, 'Vth': -50. * mvolt, 'Vreset': -65. * mvolt, 'tau_mem': 20. * msecond, 'tau_ref': 5. * msecond, 'gL': 10. * nsiemens, 'baseline_input_rate': 5.5 * hertz, 'NExt_ec3': 8000, 'N_tot_ca3': 5000, 'N_ca3_e': 4000, 'ca3_fraction_excitatory': 0.8, 'N_place_input': 3000, 'pf_peak_rate': 20. * hertz, 'rate_beta': 190.85, 'track_length': 4. * metre, 'track_bin_size': 32. * mmetre, 'input_connection_prob_noplace': 0.125, 'input_connection_prob_place': 0.125, 'SI_quantile': 0.8, 'p_E_to_E_ca3': 0.1, 'p_E_to_I_ca3': 0.1, 'p_I_to_E_ca3': 0.1, 'p_I_to_I_ca3': 0.1, 'J_ext_to_ca3': 2.2 * nsiemens, 'J_place_to_ca3': 8. * psiemens, 'J_E_to_E_ca3': 0.6 * nsiemens, 'J_E_to_I_ca3': 0.4 * nsiemens, 'J_I_to_E_ca3': 2. * nsiemens, 'J_I_to_I_ca3': 2. * nsiemens, 'eta_ip': 1.25 * uvolt, 'tau_est': 20. * second, 'target_rate': 5.5 * hertz, 'N': 5000, 'temporal_bin_size': 120. * msecond, 'temporal_bins': np.array([ 0.  ,  0.12,  0.24,  0.36,  0.48,  0.6 ,  0.72,  0.84,  0.96,
        1.08,  1.2 ,  1.32,  1.44,  1.56,  1.68,  1.8 ,  1.92,  2.04,
        2.16,  2.28,  2.4 ,  2.52,  2.64,  2.76,  2.88,  3.  ,  3.12,
        3.24,  3.36,  3.48,  3.6 ,  3.72,  3.84,  3.96,  4.08,  4.2 ,
        4.32,  4.44,  4.56,  4.68,  4.8 ,  4.92,  5.04,  5.16,  5.28,
        5.4 ,  5.52,  5.64,  5.76,  5.88,  6.  ,  6.12,  6.24,  6.36,
        6.48,  6.6 ,  6.72,  6.84,  6.96,  7.08,  7.2 ,  7.32,  7.44,
        7.56,  7.68,  7.8 ,  7.92,  8.04,  8.16,  8.28,  8.4 ,  8.52,
        8.64,  8.76,  8.88,  9.  ,  9.12,  9.24,  9.36,  9.48,  9.6 ,
        9.72,  9.84,  9.96, 10.08, 10.2 , 10.32, 10.44, 10.56, 10.68,
       10.8 , 10.92, 11.04, 11.16, 11.28, 11.4 , 11.52, 11.64, 11.76,
       11.88, 12.  , 12.12, 12.24, 12.36, 12.48, 12.6 , 12.72, 12.84,
       12.96, 13.08, 13.2 , 13.32, 13.44, 13.56, 13.68, 13.8 , 13.92,
       14.04, 14.16, 14.28, 14.4 , 14.52, 14.64, 14.76, 14.88, 15.  ])}

# Functional space (theta values)
namespace['N'] = namespace['NE'] + namespace['NI']

logging.info(f'parameters (without weight matrices):\n{namespace}')

#------- Functions
def gen_conn_matrix(npre, npost, k):
    conns = [] #every postsynaptic neuron gets a selection of predefined 
    for i in range(npost):
        conns.append(np.random.choice(range(npre), replace=False, size = (k,))) # here every postsynaptic neuron has the same number of inputs. what if every
        # presynaptic neuron has the same number of outputs?

    return np.vstack(conns), np.hstack([np.ones(k)*i for i in range(npost)]).astype(int)

def calculate_si_for_single_binned_rates(
    single_neuron_binned_rates,
    fraction_spent_in_spatial_bin,
    epsilon=1e-9
):
    mean_rate_all_bins = np.mean(single_neuron_binned_rates)
    if mean_rate_all_bins < epsilon * Hz:
        return 0.0 
    rates_div_by_mean = single_neuron_binned_rates / mean_rate_all_bins
    si_terms = np.zeros_like(rates_div_by_mean)
    positive_rate_terms_mask = rates_div_by_mean > epsilon
    if np.any(positive_rate_terms_mask):
        valid_terms = rates_div_by_mean[positive_rate_terms_mask]
        si_terms[positive_rate_terms_mask] = valid_terms * np.log2(valid_terms + epsilon)
    spatial_info_content = fraction_spent_in_spatial_bin * np.sum(si_terms)
    return spatial_info_content

class SpatialInfoCalculator():
    def __init__(self,
        spike_mon,
        pop_mons,
        NE,
        NI,
        namespace,
        simulation_time_interval,
        pop_mon_labels = ['Exc', 'Inh']):

        self.spike_mon = spike_mon
        self.pop_mons = pop_mons
        self.pop_mon_labels = pop_mon_labels
        self.namespace = namespace
        self.simulation_time_interval = simulation_time_interval
        self.NE = NE
        self.NI = NI
        self.colors = ['k', 'r'] #Colors for E and I populations

        num_neurons_total = self.NE + self.NI
        self.sts_binned = self.get_binned_spiketrains(self.spike_mon, num_neurons_total, self.simulation_time_interval,
                                                      self.namespace['temporal_bins']*second + self.simulation_time_interval[0])/self.namespace['temporal_bin_size']

        x_for_kernel = np.linspace(0,1, self.sts_binned.shape[1]) * self.namespace['simperiod']
        y_for_kernel = np.exp(-self.namespace['rate_beta'])* np.exp(self.namespace['rate_beta']*
                                                                    np.cos(np.pi/self.namespace['simperiod'] *(x_for_kernel - self.namespace['simperiod']/2)))/Hz
        kernel = y_for_kernel/(np.sum(y_for_kernel))
        self.smoothed_spiketrains = sp.ndimage.convolve1d(self.sts_binned, weights = kernel,
                                axis=1, mode='constant', cval = 0.)
        
        self.pcs_filter, self.si_of_neurons, self.quantile_si_cutoff = self._calc_pf_filter()

    def get_binned_spiketrains(self, spike_monitor, num_neurons, time_interval, space):
        # Create binned spike trains
        binned_spike_trains = np.zeros((num_neurons, len(space) - 1))
        min_time, max_time = time_interval

        for neuron_id, spike_times in spike_monitor.spike_trains().items():
            # Bin spike times, ensuring neuron_id is within bounds
            spike_times_array_mask = np.logical_and(np.greater(spike_times, min_time), np.less(spike_times, max_time)) 
            spike_times = spike_times/second #get rid  of the units
            selected_spike_times = spike_times[spike_times_array_mask]
            binned_spikes, _ = np.histogram(selected_spike_times, bins=space)
            binned_spike_trains[neuron_id, :] = binned_spikes
        return binned_spike_trains
    
    def _calc_pf_filter(self):
        spatial_information_content_per_neuron = np.array(Parallel(n_jobs = 20)(delayed(calculate_si_for_single_binned_rates)(x * Hz,
                self.namespace['track_bin_size']/self.namespace['track_length']) for x in self.smoothed_spiketrains))
        quantile = self.namespace['SI_quantile']
        q = np.quantile(spatial_information_content_per_neuron, quantile)
        pcs_filter = np.where(spatial_information_content_per_neuron>q)[0]

        return pcs_filter, spatial_information_content_per_neuron, q


N_ca3_e = namespace['N_ca3_e']

eqs_ca3 = '''
dv/dt = (gL*(El-v) + ge*(Ee-v) + gi*(Ei-v)) / (gL*tau_mem) : volt (unless refractory)
dge/dt = -ge/tau_exc : siemens
dgi/dt = -gi/tau_inh : siemens
dx/dt = -x/tau_est : 1
'''


ca3_neurons = NeuronGroup(namespace['N_tot_ca3'], eqs_ca3, threshold='v>Vth', reset='v=Vreset; x += 1.', refractory=namespace['tau_ref'], method='euler', namespace=namespace, name = 'ca3_neurons')
N_ca3_e = namespace['N_ca3_e']
N_ca3_i = namespace['N_tot_ca3'] - N_ca3_e
e_ca3_neurons, i_ca3_neurons = ca3_neurons[:N_ca3_e], ca3_neurons[N_ca3_e:]

ca3_neurons.v = 'El + rand() * (Vth-El)' # initial random membrane potential 

# # External input
# Add these parameters to your namespace
namespace['speed'] = namespace['track_length'] / namespace['simperiod']

# This group represents the "animal"
animal_eqs = animal_eqs = '''
pos = (speed * t) % track_length : meter (constant over dt)
place_input_on : 1 (constant)
'''
animal = NeuronGroup(1, animal_eqs, namespace=namespace, name='animal')
# This part is fine, just get the centers

centres_pos = np.sort(np.random.uniform(0, float(namespace['track_length']/meter), namespace['N_place_input'])) * meter
center_holder_eqs = 'centres : meter (constant)'
place_field_centers = NeuronGroup(namespace['N_place_input'],
                                center_holder_eqs,
                                name='place_field_centers')
place_field_centers.centres = centres_pos

# This equation will be evaluated for each neuron 'i' at each time step 't'
place_input_rates = '''
(baseline_input_rate + 
place_input_on * (i < N_place_input) * (pf_peak_rate * exp(-rate_beta) * exp(rate_beta * cos(pi/track_length * (pos - centres))))
)
'''

# Add the new variables to the namespace for the PoissonGroup
input_namespace = {
    'pos' : animal.variables['pos'],
    'place_input_on' : animal.variables['place_input_on'],
    'N_place_input' : namespace['N_place_input'],
    'baseline_input_rate' : namespace['baseline_input_rate'],
    'rate_beta' : namespace['rate_beta'],
    'track_length' : namespace['track_length'],
    'pf_peak_rate' : namespace['pf_peak_rate'],
    'centres': place_field_centers.variables['centres']
}


# Place inputs to CA3
ca3_inputs = PoissonGroup(namespace['N_place_input'], rates=place_input_rates, 
                        namespace=input_namespace, name='place_inputs_to_ca3')
place_to_ca3_syns = Synapses(ca3_inputs, ca3_neurons, 'w: siemens', on_pre='ge += w', name='place_to_ca3_syns')
place_to_ca3_syns.connect(p = namespace['input_connection_prob_place'])
place_to_ca3_syns.w = namespace['J_place_to_ca3'] / np.sqrt(namespace['input_connection_prob_place'] * namespace['N_place_input'])
# # EC3 -> CA3 inputs
ec3_inputs = PoissonGroup(namespace['NExt_ec3'], rates=namespace['baseline_input_rate'], name='ec3_inputs')
ec3_to_ca3 = Synapses(ec3_inputs, ca3_neurons, 'w: siemens', on_pre='ge += w', namespace=namespace, name = 'ec3_to_ca3_syns')
ec3_to_ca3.connect(p = namespace['input_connection_prob_noplace'])
ec3_to_ca3.w = namespace['J_ext_to_ca3']/np.sqrt(namespace['input_connection_prob_noplace'] * namespace['NExt_ec3'])

##############################
# # E-I Synapses (CA3 -> CA3):
ca3_E_to_ca3_E = Synapses(e_ca3_neurons, e_ca3_neurons, 'w :siemens', on_pre = 'ge += w')
ca3_E_to_ca3_E.connect(p = namespace['p_E_to_E_ca3'])
ca3_E_to_ca3_E.w = namespace['J_E_to_I_ca3']/np.sqrt(namespace['p_E_to_E_ca3'] * N_ca3_e) 

ca3_E_to_ca3_I = Synapses(e_ca3_neurons, i_ca3_neurons, 'w :siemens', on_pre = 'ge += w')
ca3_E_to_ca3_I.connect(p = namespace['p_E_to_I_ca3'])
ca3_E_to_ca3_I.w = namespace['J_E_to_I_ca3']/np.sqrt(namespace['p_E_to_I_ca3'] * N_ca3_e) 

# # I-E Synapses:
ca3_I_to_ca3_E = Synapses(i_ca3_neurons, e_ca3_neurons, 'w :siemens', on_pre = 'gi += w')
ca3_I_to_ca3_E.connect(p = namespace['p_I_to_E_ca3'])
ca3_I_to_ca3_E.w = namespace['J_I_to_E_ca3']/np.sqrt(namespace['p_I_to_E_ca3'] * N_ca3_i) 

# # I-I Synapses:
ca3_I_to_ca3_I = Synapses(i_ca3_neurons, i_ca3_neurons, 'w :siemens', on_pre = 'gi += w')
ca3_I_to_ca3_I.connect(p = namespace['p_I_to_I_ca3'])
ca3_I_to_ca3_I.w = namespace['J_I_to_I_ca3']/np.sqrt(namespace['p_I_to_I_ca3'] * N_ca3_i) 

pop_rate_ca3_e = PopulationRateMonitor(e_ca3_neurons)
pop_rate_ca3_i = PopulationRateMonitor(i_ca3_neurons)

spike_mon_ca3 = SpikeMonitor(ca3_neurons)
# plast_mon = StateMonitor(ca3_neurons, ['x', 'Uth'], record=True, dt=100*ms)

net = Network(collect())
# net.store('blank')
# 1. Initialize the switch to OFF
animal.place_input_on = 0.

# 2. Run thermalization with only baseline input
print(f"Running thermalization for {namespace['t_thermalize']}...")
net.run(namespace['t_thermalize'], report='text', namespace = locals())

# 3. Turn the place field switch ON
print("Turning on place-tuned input...")
animal.place_input_on = 1.

# 4. Run with place fields BEFORE silencing
print(f"Running with place fields for {namespace['simperiod']}...")
net.run(namespace['simtime'], report='text', namespace = locals())


# Defaults from namespace (in nS units, made dimensionless by dividing by nS)
_default_x0 = np.array([
    namespace['J_E_to_E_ca3']/nS,
    namespace['J_E_to_I_ca3']/nS,
    namespace['J_I_to_E_ca3']/nS,
    namespace['J_I_to_I_ca3']/nS,
    namespace['J_place_to_ca3']/nS,
])

# If CLI weights are provided, use them; otherwise fall back to defaults
x0 = _default_x0
print(f'x0 in nS units: {x0}')
logging.info(f'x0 in nS units: {x0}')

device.build()

def minimization_function(weights):
    # Reinitialize monitors
    global pop_rate_ca3_e
    global pop_rate_ca3_i
    global spike_mon_ca3
    
    net.remove([pop_rate_ca3_e, pop_rate_ca3_i, spike_mon_ca3])

    pop_rate_ca3_e = PopulationRateMonitor(e_ca3_neurons)
    pop_rate_ca3_i = PopulationRateMonitor(i_ca3_neurons)
    spike_mon_ca3 = SpikeMonitor(ca3_neurons)

    net.add(pop_rate_ca3_e, pop_rate_ca3_i, spike_mon_ca3)

    # net.restore('blank')
    np.random.seed(namespace['random_seed'])
    seed(namespace['random_seed'])

    ca3_E_to_ca3_E.w = weights[0] * nS /np.sqrt(namespace['p_E_to_E_ca3'] * N_ca3_e)
    ca3_E_to_ca3_I.w = weights[1] * nS /np.sqrt(namespace['p_E_to_I_ca3'] * N_ca3_e)
    ca3_I_to_ca3_E.w = weights[2] * nS /np.sqrt(namespace['p_I_to_E_ca3'] * N_ca3_i)
    ca3_I_to_ca3_I.w = weights[3] * nS /np.sqrt(namespace['p_I_to_I_ca3'] * N_ca3_i)
    place_to_ca3_syns.w = weights[4] * nS / np.sqrt(namespace['input_connection_prob_place'] * namespace['N_place_input'])

    # 1. Initialize the switch to OFF
    animal.place_input_on = 0.
    
    # 2. Run thermalization with only baseline input
    net.run(namespace['t_thermalize'], report=None, namespace = locals())
    # 3. Turn the place field switch ON
    animal.place_input_on = 1.
    
    # 4. Run with place fields BEFORE silencing
    net.run(namespace['simperiod'], report=None, namespace = locals())

    si_calc = SpatialInfoCalculator(spike_mon_ca3, [pop_rate_ca3_e, pop_rate_ca3_i],
        namespace['N_ca3_e'], namespace['N_tot_ca3'] - N_ca3_e, namespace, (namespace['t_thermalize'], namespace['t_thermalize'] + namespace['simperiod']))

    logging.info(f'mean rate E: {pop_rate_ca3_e.rate[:].mean()}, I: {pop_rate_ca3_i.rate[:].mean()}')
    logging.info(f'inverse cost (spatial information cutoff): {si_calc.quantile_si_cutoff}')
    logging.info(f'weights (in nS): {weights}')
    return 1/si_calc.quantile_si_cutoff


res = minimize(minimization_function, x0,
               method = "Nelder-Mead",
               bounds = [(0.001, 2.5), (0.001, 2.5), (0.001, 2.5), (0.001, 2.5), (0.0001, 0.05)],
               options = {'disp' : True, 'maxiter' : 2000, 'xatol' : 1e-4}
              )

logging.info(res)
logging.info(f'success: {res.success}')
logging.info(f'sol: {res.x}')
logging.info(f'status: {res.status}')
logging.info(f'message: {res.message}')
logging.info(f'final cost: {res.fun}')




What you have already tried

I have already tried without using the cpp_standalone mode, and this works fine, however this is slow. The cpp_standalone speedup is noticeable, and I’d like to make it work.

Full traceback of error (if relevant)

(brian) maxmax@pen-and-paper place-fields % python optimize-weights-si-content-debug.py
Running thermalization for 5. s...
Turning on place-tuned input...
Running with place fields for 15. s...
x0 in nS units: [0.6   0.4   2.    2.    0.008]
Starting simulation at t=0 s for duration 5 s
5 s (100%) simulated in 4s
Starting simulation at t=5 s for duration 20 s
12.1496 s (60%) simulated in 10s, estimated 6s remaining.
20 s (100%) simulated in 16s
ERROR      Brian 2 encountered an unexpected error. If you think this is a bug in Brian 2, please report this issue either to the discourse forum at <http://brian.discourse.group/>, or to the issue tracker at <https://github.com/brian-team/brian2/issues>. Please include this file with debug information in your report: /var/folders/g2/h34cbf9j2_g22l_2v68z7ml40000gn/T/brian_debug_rcbzrug2.log  Additionally, you can also include a copy of the script that was run, available at: /var/folders/g2/h34cbf9j2_g22l_2v68z7ml40000gn/T/brian_script_h1lf0kg7.py You can also include a copy of the redirected std stream outputs, available at '/var/folders/g2/h34cbf9j2_g22l_2v68z7ml40000gn/T/brian_stdout_nqmjg06m.log' and '/var/folders/g2/h34cbf9j2_g22l_2v68z7ml40000gn/T/brian_stderr_qk6zqpw7.log'. Thanks! [brian2]
Traceback (most recent call last):
  File "/Users/maxmax/Documents/place-fields/optimize-weights-si-content-debug.py", line 300, in <module>
    res = minimize(minimization_function, x0,
          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/scipy/optimize/_minimize.py", line 773, in minimize
    res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds,
          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/scipy/optimize/_optimize.py", line 851, in _minimize_neldermead
    fsim[k] = func(sim[k])
              ^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/scipy/optimize/_optimize.py", line 560, in function_wrapper
    fx = function(np.copy(x), *(wrapper_args + args))
         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/Documents/place-fields/optimize-weights-si-content-debug.py", line 291, in minimization_function
    si_calc = SpatialInfoCalculator(spike_mon_ca3, [pop_rate_ca3_e, pop_rate_ca3_i],
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/Documents/place-fields/optimize-weights-si-content-debug.py", line 92, in __init__
    self.sts_binned = self.get_binned_spiketrains(self.spike_mon, num_neurons_total, self.simulation_time_interval,
                      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/Documents/place-fields/optimize-weights-si-content-debug.py", line 109, in get_binned_spiketrains
    for neuron_id, spike_times in spike_monitor.spike_trains().items():
                                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/monitors/spikemonitor.py", line 518, in spike_trains
    return self.event_trains()
           ^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/monitors/spikemonitor.py", line 384, in event_trains
    return self.values("t")
           ^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/monitors/spikemonitor.py", line 555, in values
    return super().values(var)
           ^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/monitors/spikemonitor.py", line 312, in values
    indices = self.i[:]
              ~~~~~~^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/core/variables.py", line 929, in __getitem__
    return self.get_item(item, level=1)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/core/variables.py", line 921, in get_item
    values = self.get_with_index_array(item)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/core/base.py", line 346, in device_override_decorated_function
    return func(*args, **kwds)
           ^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/core/variables.py", line 1245, in get_with_index_array
    return variable.get_value()[indices]
           ^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/core/variables.py", line 517, in get_value
    return self.device.get_value(self)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/maxmax/envs/brian/lib/python3.12/site-packages/brian2/devices/cpp_standalone/device.py", line 557, in get_value
    with open(fname, "rb") as f:
         ^^^^^^^^^^^^^^^^^
FileNotFoundError: [Errno 2] No such file or directory: '/Users/maxmax/Documents/place-fields/output/results/_dynamic_array_spikemonitor_1_i_2680224553'

Hi @flmuk. From a quick look I’m not yet sure about the exact workflow that you need, but in general you should use the approach outlined here: Computational methods and efficiency — Brian 2 0.0.post128 documentation In the minimization function, you’d set the weights via the run_args argument.
What I am unclear about: how important is it that during optimization, the network starts from the state after the initial simulation? The problem with the approach linked above is that the network always starts from the beginning, i.e. it doesn’t continue a previous run. But if your first “blank” run is only for comparison, then this wouldn’t matter, If the initial state of the network needs to correspond to the end of the “blank” run, then you need to put in a bit more work: you need to manually save the variables of interest (e.g. the NeuronGroup’s membrane potential), and then provide it as the initial value for your optimization run via run_args. But given the speed advantage of C++ standalone mode, it might also be fast enough to include the initial run (or a shorter version of it) within the optimization loop – it would be redundant across runs, but it would greatly simplify the code. Hope that gives you something to work with!

1 Like