I was really excited to run a grid search using GridSearchCV
by encapsulating my Brian model as an Estimator()
. I want to do this so that all the parallelisation and bookkeeping is taken care of for me; I have successfully used this technique with a Nengo model.
The problem is that it seems like GridSearchCV tries to pickle my object in order to dispatch it to the workers and I’m getting TypeError: cannot pickle 'weakref' object
.
Is there any way I can get around this? I really want for this to work.
I’m attaching the debug files generated by Brian.
My code structure would look something like this:
class RCN_Estimator(BaseEstimator, RegressorMixin):
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def __init__(self, ba=15, i_e_w=10, i_freq=20, cue_percentage=100, a2_cue_time=0.1, attractors=2):
self.rcn = RecurrentCompetitiveNet()
self.ba = ba
self.i_e_w = i_e_w
self.i_freq = i_freq
self.cue_percentage = cue_percentage
self.a2_cue_time = a2_cue_time
self.attractors = attractors
...
def fit(self, X, y=None):
self.rcn.run_net(duration=2)
...
self.results_ready = True
return self
def predict(self, X):
assert self.results_ready, "You must call fit() before calling predict()"
return np.array([0])
def score(self, X, y=None, sample_weight=None):
assert self.results_ready, "You must call fit() before calling score()"
atr_ps_counts = count_ps(
rcn=self.rcn,
attractors=self.attractors,
time_window=self.attractors_t_windows,
spk_sync_thr=0.75)
return atr_ps_counts
num_par = 2
cv = 2
param_grid = {
'a2_cue_time': np.linspace(0.1, 1, num=np.rint(num_par).astype(int)),
}
print('Initial search')
print('Param grid:', param_grid)
gs = GridSearchCV(RCN_Estimator(), param_grid=param_grid, n_jobs=-1, verbose=3, cv=cv)
gs.fit(np.zeros(cv))
print('Best parameters:', gs.best_params_)
print('Best score:', gs.best_score_)
Thanks!
joblib.externals.loky.process_executor._RemoteTraceback:
"""
Traceback (most recent call last):
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/joblib/externals/loky/backend/queues.py", line 153, in _feed
obj_ = dumps(obj, reducers=reducers)
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/joblib/externals/loky/backend/reduction.py", line 271, in dumps
dump(obj, buf, reducers=reducers, protocol=protocol)
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/joblib/externals/loky/backend/reduction.py", line 264, in dump
_LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py", line 602, in dump
return Pickler.dump(self, obj)
TypeError: cannot pickle 'weakref' object
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/pydevconsole.py", line 364, in runcode
coro = func()
File "<input>", line 1, in <module>
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_bundle/pydev_umd.py", line 198, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/Users/thomas/PycharmProjects/wm_colaboration/network_dynamics/RCN/intrinsic_plasticity/RCN_intrinsic_adaptation_simulation_parallel.py", line 247, in <module>
gs.fit(np.zeros(cv))
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/sklearn/model_selection/_search.py", line 875, in fit
self._run_search(evaluate_candidates)
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/sklearn/model_selection/_search.py", line 1375, in _run_search
evaluate_candidates(ParameterGrid(self.param_grid))
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/sklearn/model_selection/_search.py", line 822, in evaluate_candidates
out = parallel(
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/joblib/parallel.py", line 1056, in __call__
self.retrieve()
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/joblib/parallel.py", line 935, in retrieve
self._output.extend(job.get(timeout=self.timeout))
File "/Users/thomas/.conda/envs/brian/lib/python3.9/site-packages/joblib/_parallel_backends.py", line 542, in wrap_future_result
return future.result(timeout=timeout)
File "/Users/thomas/.conda/envs/brian/lib/python3.9/concurrent/futures/_base.py", line 446, in result
return self.__get_result()
File "/Users/thomas/.conda/envs/brian/lib/python3.9/concurrent/futures/_base.py", line 391, in __get_result
raise self._exception
_pickle.PicklingError: Could not pickle the task to send it to the workers.
brian_debug_n1rccgar.log (1.0 MB)
brian_script_umgkest4.py (10.8 KB)
UPDATE 1: a bit of digging shows that the error is raised in this line of code within the Joblib backend:
UPDATE 2: I also tried running the example at Example: 01_using_cython — Brian 2 2.5.1 documentation and that also leads to pickling issues:
Traceback (most recent call last):
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/pydevconsole.py", line 364, in runcode
coro = func()
File "<input>", line 1, in <module>
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_bundle/pydev_umd.py", line 198, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/Users/thomas/PycharmProjects/wm_colaboration/network_dynamics/RCN/intrinsic_plasticity/RCN_intrinsic_adaptation_simulation_parallel_multiprocessing.py", line 22, in <module>
results = p.map(run_sim, tau_values)
File "/Users/thomas/.conda/envs/brian/lib/python3.9/multiprocessing/pool.py", line 364, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/Users/thomas/.conda/envs/brian/lib/python3.9/multiprocessing/pool.py", line 771, in get
raise self._value
File "/Users/thomas/.conda/envs/brian/lib/python3.9/multiprocessing/pool.py", line 537, in _handle_tasks
put(task)
File "/Users/thomas/.conda/envs/brian/lib/python3.9/multiprocessing/connection.py", line 211, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/Users/thomas/.conda/envs/brian/lib/python3.9/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <function run_sim at 0x104934280>: attribute lookup run_sim on __main__ failed
UPDATE 3: Switching multiprocessing
for pathos
seems to have solved the issue in running Example: 01_using_cython — Brian 2 2.5.1 documentation