>>> brian2.test('cython', test_codegen_independent=False, additional_args=['-x']) Running tests in /Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2 for targets cython (excluding long tests) Running Brian version 2.4.2.post0.dev317 from '/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2' Resetting to default preferences Running tests for target cython: F =================================================================================================== FAILURES =================================================================================================== _______________________________________________________________________________________________ test_active_flag _______________________________________________________________________________________________ self = obj = '/Users/adam/Library/Caches/cython/brian_extensions/Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.o' src = '/Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.cpp', ext = '.cpp' cc_args = ['-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include', '-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib...python3.8/site-packages/brian2/synapses', '-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8', '-c'] extra_postargs = ['-w', '-O3', '-ffast-math', '-fno-finite-math-only', '-march=native', '-std=c++11', ...] pp_opts = ['-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include', '-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib...v/lib/python3.8/site-packages/brian2/synapses', '-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8'] def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): compiler_so = self.compiler_so if sys.platform == 'darwin': compiler_so = _osx_support.compiler_fixup(compiler_so, cc_args + extra_postargs) try: > self.spawn(compiler_so + cc_args + [src, '-o', obj] + extra_postargs) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/unixccompiler.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , cmd = ['x86_64-apple-darwin13.4.0-clang', '-fno-strict-aliasing', '-Wsign-compare', '-Wunreachable-code', '-DNDEBUG', '-fwrapv', ...] def spawn(self, cmd): > spawn(cmd, dry_run=self.dry_run) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/ccompiler.py:910: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ cmd = ['x86_64-apple-darwin13.4.0-clang', '-fno-strict-aliasing', '-Wsign-compare', '-Wunreachable-code', '-DNDEBUG', '-fwrapv', ...], search_path = 1, verbose = 0, dry_run = 0 def spawn(cmd, search_path=1, verbose=0, dry_run=0): """Run another program, specified as a command list 'cmd', in a new process. 'cmd' is just the argument list for the new process, ie. cmd[0] is the program to run and cmd[1:] are the rest of its arguments. There is no way to run a program with a name different from that of its executable. If 'search_path' is true (the default), the system's executable search path will be used to find the program; otherwise, cmd[0] must be the exact path to the executable. If 'dry_run' is true, the command will not actually be run. Raise DistutilsExecError if running the program fails in any way; just return on success. """ # cmd is documented as a list, but just in case some code passes a tuple # in, protect our %-formatting code against horrible death cmd = list(cmd) if os.name == 'posix': > _spawn_posix(cmd, search_path, dry_run=dry_run) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/spawn.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ cmd = 'x86_64-apple-darwin13.4.0-clang', search_path = 1, verbose = 0, dry_run = 0 def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0): log.info(' '.join(cmd)) if dry_run: return executable = cmd[0] exec_fn = search_path and os.execvp or os.execv env = None if sys.platform == 'darwin': global _cfg_target, _cfg_target_split if _cfg_target is None: from distutils import sysconfig _cfg_target = sysconfig.get_config_var( 'MACOSX_DEPLOYMENT_TARGET') or '' if _cfg_target: _cfg_target_split = [int(x) for x in _cfg_target.split('.')] if _cfg_target: # ensure that the deployment target of build process is not less # than that used when the interpreter was built. This ensures # extension modules are built with correct compatibility values cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target) if _cfg_target_split > [int(x) for x in cur_target.split('.')]: my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: ' 'now "%s" but "%s" during configure' % (cur_target, _cfg_target)) raise DistutilsPlatformError(my_msg) env = dict(os.environ, MACOSX_DEPLOYMENT_TARGET=cur_target) exec_fn = search_path and os.execvpe or os.execve pid = os.fork() if pid == 0: # in the child try: if env is None: exec_fn(executable, cmd) else: exec_fn(executable, cmd, env) except OSError as e: if not DEBUG: cmd = executable sys.stderr.write("unable to execute %r: %s\n" % (cmd, e.strerror)) os._exit(1) if not DEBUG: cmd = executable sys.stderr.write("unable to execute %r for unknown reasons" % cmd) os._exit(1) else: # in the parent # Loop until the child either exits or is terminated by a signal # (ie. keep waiting if it's merely stopped) while True: try: pid, status = os.waitpid(pid, 0) except OSError as exc: if not DEBUG: cmd = executable raise DistutilsExecError( "command %r failed: %s" % (cmd, exc.args[-1])) if os.WIFSIGNALED(status): if not DEBUG: cmd = executable raise DistutilsExecError( "command %r terminated by signal %d" % (cmd, os.WTERMSIG(status))) elif os.WIFEXITED(status): exit_status = os.WEXITSTATUS(status) if exit_status == 0: return # hey, it succeeded! else: if not DEBUG: cmd = executable > raise DistutilsExecError( "command %r failed with exit status %d" E distutils.errors.DistutilsExecError: command 'x86_64-apple-darwin13.4.0-clang' failed with exit status 1 ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/spawn.py:157: DistutilsExecError During handling of the above exception, another exception occurred: self = MagicNetwork() run_namespace = {'@py_builtins': , '@pytest_ar': , 'ALLOW_THREADS': 1, 'Annotation': , ...} @device_override('network_before_run') def before_run(self, run_namespace): ''' before_run(namespace) Prepares the `Network` for a run. Objects in the `Network` are sorted into the correct running order, and their `BrianObject.before_run` methods are called. Parameters ---------- run_namespace : dict-like, optional A namespace in which objects which do not define their own namespace will be run. ''' all_objects = self.sorted_objects prefs.check_all_validated() # Check names in the network for uniqueness names = [obj.name for obj in all_objects] non_unique_names = [name for name, count in Counter(names).items() if count > 1] if len(non_unique_names): formatted_names = ', '.join("'%s'" % name for name in non_unique_names) raise ValueError('All objects in a network need to have unique ' 'names, the following name(s) were used more than ' 'once: %s' % formatted_names) # Check that there are no SummedVariableUpdaters targeting the same # target variable _check_multiple_summed_updaters(all_objects) self._stopped = False Network._globally_stopped = False device = get_device() if device.network_schedule is not None: # The device defines a fixed network schedule if device.network_schedule != self.schedule: # TODO: The human-readable name of a device should be easier to get device_name = list(all_devices.keys())[list(all_devices.values()).index(device)] logger.warn(("The selected device '{device_name}' only " "supports a fixed schedule, but this schedule is " "not consistent with the network's schedule. The " "simulation will use the device's schedule.\n" "Device schedule: {device.network_schedule}\n" "Network schedule: {net.schedule}\n" "Set the network schedule explicitly or set the " "core.network.default_schedule preference to " "avoid this warning.").format(device_name=device_name, device=device, net=self), name_suffix='schedule_conflict', once=True) logger.debug("Preparing network {self.name} with {numobj} " "objects: {objnames}".format(self=self, numobj=len(all_objects), objnames=', '.join(obj.name for obj in all_objects)), "before_run") self.check_dependencies() for obj in all_objects: if obj.active: try: > obj.before_run(run_namespace) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/core/network.py:901: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = StateUpdater(clock=Clock(dt=100. * usecond, name='defaultclock'), when=groups, order=0, name='neurongroup_stateupdater') run_namespace = {'@py_builtins': , '@pytest_ar': , 'ALLOW_THREADS': 1, 'Annotation': , ...} def before_run(self, run_namespace): > self.create_code_objects(run_namespace) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/groups/group.py:1142: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = StateUpdater(clock=Clock(dt=100. * usecond, name='defaultclock'), when=groups, order=0, name='neurongroup_stateupdater') run_namespace = {'@py_builtins': , '@pytest_ar': , 'ALLOW_THREADS': 1, 'Annotation': , ...} def create_code_objects(self, run_namespace): # By default, we only have one code object for each CodeRunner. # Overwrite this function to use more than one. > code_object = self.create_default_code_object(run_namespace) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/groups/group.py:1135: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = StateUpdater(clock=Clock(dt=100. * usecond, name='defaultclock'), when=groups, order=0, name='neurongroup_stateupdater') run_namespace = {'@py_builtins': , '@pytest_ar': , 'ALLOW_THREADS': 1, 'Annotation': , ...} def create_default_code_object(self, run_namespace): self.update_abstract_code(run_namespace=run_namespace) # If the CodeRunner has variables, add them if hasattr(self, 'variables'): additional_variables = self.variables else: additional_variables = None if not self.generate_empty_code and len(self.abstract_code) == 0: self.codeobj = None else: > self.codeobj = create_runner_codeobj(group=self.group, code=self.abstract_code, user_code=self.user_code, template_name=self.template, name=self.name + '_codeobject*', check_units=self.check_units, additional_variables=additional_variables, needed_variables=self.needed_variables, run_namespace=run_namespace, template_kwds=self.template_kwds, override_conditional_write=self.override_conditional_write, codeobj_class=self.codeobj_class ) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/groups/group.py:1117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ group = , code = {None: '_v = dt/ms + v\nv = _v'}, template_name = 'stateupdate' run_namespace = {'@py_builtins': , '@pytest_ar': , 'ALLOW_THREADS': 1, 'Annotation': , ...} user_code = {None: 'v = 1/ms'}, variable_indices = None, name = 'neurongroup_stateupdater_codeobject*', check_units = False, needed_variables = [], additional_variables = None, template_kwds = None override_conditional_write = set(), codeobj_class = def create_runner_codeobj(group, code, template_name, run_namespace, user_code=None, variable_indices=None, name=None, check_units=True, needed_variables=None, additional_variables=None, template_kwds=None, override_conditional_write=None, codeobj_class=None ): ''' Create a `CodeObject` for the execution of code in the context of a `Group`. Parameters ---------- group : `Group` The group where the code is to be run code : str or dict of str The code to be executed. template_name : str The name of the template to use for the code. run_namespace : dict-like An additional namespace that is used for variable lookup (either an explicitly defined namespace or one taken from the local context). user_code : str, optional The code that had been specified by the user before other code was added automatically. If not specified, will be assumed to be identical to ``code``. variable_indices : dict-like, optional A mapping from `Variable` objects to index names (strings). If none is given, uses the corresponding attribute of `group`. name : str, optional A name for this code object, will use ``group + '_codeobject*'`` if none is given. check_units : bool, optional Whether to check units in the statement. Defaults to ``True``. needed_variables: list of str, optional A list of variables that are neither present in the abstract code, nor in the ``USES_VARIABLES`` statement in the template. This is only rarely necessary, an example being a `StateMonitor` where the names of the variables are neither known to the template nor included in the abstract code statements. additional_variables : dict-like, optional A mapping of names to `Variable` objects, used in addition to the variables saved in `group`. template_kwds : dict, optional A dictionary of additional information that is passed to the template. override_conditional_write: list of str, optional A list of variable names which are used as conditions (e.g. for refractoriness) which should be ignored. codeobj_class : class, optional The `CodeObject` class to run code with. If not specified, defaults to the `group`'s ``codeobj_class`` attribute. ''' if name is None: if group is not None: name = '%s_%s_codeobject*' % (group.name, template_name) else: name = '%s_codeobject*' % template_name if user_code is None: user_code = code if isinstance(code, str): code = {None: code} user_code = {None: user_code} msg = 'Creating code object (group=%s, template name=%s) for abstract code:\n' % (group.name, template_name) msg += indent(code_representation(code)) logger.diagnostic(msg) from brian2.devices import get_device device = get_device() if override_conditional_write is None: override_conditional_write = set([]) else: override_conditional_write = set(override_conditional_write) if codeobj_class is None: codeobj_class = device.code_object_class(group.codeobj_class) else: codeobj_class = device.code_object_class(codeobj_class) template = getattr(codeobj_class.templater, template_name) template_variables = getattr(template, 'variables', None) all_variables = dict(group.variables) if additional_variables is not None: all_variables.update(additional_variables) # Determine the identifiers that were used identifiers = set() user_identifiers = set() for v, u_v in zip(code.values(), user_code.values()): _, uk, u = analyse_identifiers(v, all_variables, recursive=True) identifiers |= uk | u _, uk, u = analyse_identifiers(u_v, all_variables, recursive=True) user_identifiers |= uk | u # Add variables that are not in the abstract code, nor specified in the # template but nevertheless necessary if needed_variables is None: needed_variables = [] # Resolve all variables (variables used in the code and variables needed by # the template) variables = group.resolve_all(identifiers | set(needed_variables) | set(template_variables), # template variables are not known to the user: user_identifiers=user_identifiers, additional_variables=additional_variables, run_namespace=run_namespace) # We raise this error only now, because there is some non-obvious code path # where Jinja tries to get a Synapse's "name" attribute via syn['name'], # which then triggers the use of the `group_get_indices` template which does # not exist for standalone. Putting the check for template == None here # means we will first raise an error about the unknown identifier which will # then make Jinja try syn.name if template is None: codeobj_class_name = codeobj_class.class_name or codeobj_class.__name__ raise AttributeError(('"%s" does not provide a code generation ' 'template "%s"') % (codeobj_class_name, template_name)) conditional_write_variables = {} # Add all the "conditional write" variables for var in variables.values(): cond_write_var = getattr(var, 'conditional_write', None) if cond_write_var in override_conditional_write: continue if cond_write_var is not None: if (cond_write_var.name in variables and not variables[cond_write_var.name] is cond_write_var): logger.diagnostic(('Variable "%s" is needed for the ' 'conditional write mechanism of variable ' '"%s". Its name is already used for %r.') % (cond_write_var.name, var.name, variables[cond_write_var.name])) else: conditional_write_variables[cond_write_var.name] = cond_write_var variables.update(conditional_write_variables) if check_units: for c in code.values(): # This is the first time that the code is parsed, catch errors try: check_units_statements(c, variables) except (SyntaxError, ValueError) as ex: error_msg = _error_msg(c, name) raise ValueError(error_msg) from ex all_variable_indices = copy.copy(group.variables.indices) if additional_variables is not None: all_variable_indices.update(additional_variables.indices) if variable_indices is not None: all_variable_indices.update(variable_indices) # Make "conditional write" variables use the same index as the variable # that depends on them for varname, var in variables.items(): cond_write_var = getattr(var, 'conditional_write', None) if cond_write_var is not None: all_variable_indices[cond_write_var.name] = all_variable_indices[varname] # Check that all functions are available for varname, value in variables.items(): if isinstance(value, Function): try: value.implementations[codeobj_class] except KeyError as ex: # if we are dealing with numpy, add the default implementation from brian2.codegen.runtime.numpy_rt import NumpyCodeObject if codeobj_class is NumpyCodeObject: value.implementations.add_numpy_implementation(value.pyfunc) else: raise NotImplementedError(('Cannot use function ' '%s: %s') % (varname, ex)) from ex # Gather the additional compiler arguments declared by function # implementations all_keywords = [_gather_compiler_kwds(var, codeobj_class) for var in variables.values() if isinstance(var, Function)] compiler_kwds = _merge_compiler_kwds(all_keywords) # Add the indices needed by the variables for varname in list(variables): var_index = all_variable_indices[varname] if not var_index in ('_idx', '0'): variables[var_index] = all_variables[var_index] > return device.code_object(owner=group, name=name, abstract_code=code, variables=variables, template_name=template_name, variable_indices=all_variable_indices, template_kwds=template_kwds, codeobj_class=codeobj_class, override_conditional_write=override_conditional_write, compiler_kwds=compiler_kwds ) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/codegen/codeobject.py:441: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , owner = , name = 'neurongroup_stateupdater_codeobject' abstract_code = {None: '_v = dt/ms + v\nv = _v'} variables = {'N': , 'dt': , 'v': } template_name = 'stateupdate', variable_indices = defaultdict(..., {'N': '0', 't': '0', 'dt': '0', 't_in_timesteps': '0', 'v': '_idx', 'ms': '_idx', '_v': '_idx'}) codeobj_class = template_kwds = {'dt': '_array_defaultclock_dt[0]', 'load_namespace': '_var_N = _namespace["_var_N"]\ncdef int64_t N = _namespace["N"]...group_v\'])\ncdef double v\ncdef double _lio_1\ncdef double _v', 'support_code_lines': [], 'v': '_array_neurongroup_v'} override_conditional_write = set(), compiler_kwds = defaultdict(, {}) def code_object(self, owner, name, abstract_code, variables, template_name, variable_indices, codeobj_class=None, template_kwds=None, override_conditional_write=None, compiler_kwds=None): if compiler_kwds is None: compiler_kwds = {} name = find_name(name) codeobj_class = self.code_object_class(codeobj_class) template = getattr(codeobj_class.templater, template_name) iterate_all = template.iterate_all generator = codeobj_class.generator_class(variables=variables, variable_indices=variable_indices, owner=owner, iterate_all=iterate_all, codeobj_class=codeobj_class, override_conditional_write=override_conditional_write, allows_scalar_write=template.allows_scalar_write, name=name, template_name=template_name) if template_kwds is None: template_kwds = dict() else: template_kwds = template_kwds.copy() logger.diagnostic('%s abstract code:\n%s' % (name, indent(code_representation(abstract_code)))) scalar_code, vector_code, kwds = generator.translate(abstract_code, dtype=prefs['core.default_float_dtype']) # Add the array names as keywords as well for varname, var in variables.items(): if isinstance(var, ArrayVariable): pointer_name = generator.get_array_name(var) if var.scalar: pointer_name += '[0]' template_kwds[varname] = pointer_name if hasattr(var, 'resize'): dyn_array_name = generator.get_array_name(var, access_data=False) template_kwds['_dynamic_'+varname] = dyn_array_name template_kwds.update(kwds) logger.diagnostic('%s snippet (scalar):\n%s' % (name, indent(code_representation(scalar_code)))) logger.diagnostic('%s snippet (vector):\n%s' % (name, indent(code_representation(vector_code)))) code = template(scalar_code, vector_code, owner=owner, variables=variables, codeobj_name=name, variable_indices=variable_indices, get_array_name=generator.get_array_name, **template_kwds) logger.diagnostic('%s code:\n%s' % (name, indent(code_representation(code)))) codeobj = codeobj_class(owner, code, variables, variable_indices, template_name=template_name, template_source=template.template_source, name=name, compiler_kwds=compiler_kwds) > codeobj.compile() ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/devices/device.py:325: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def compile(self): for block in ['before_run', 'run', 'after_run']: > self.compiled_code[block] = self.compile_block(block) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/codegen/codeobject.py:101: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , block = 'run' def compile_block(self, block): code = getattr(self.code, block, '').strip() if not code or 'EMPTY_CODE_BLOCK' in code: return None > return cython_extension_manager.create_extension( code, define_macros=self.define_macros, libraries=self.libraries, extra_compile_args=self.extra_compile_args, extra_link_args=self.extra_link_args, include_dirs=self.include_dirs, library_dirs=self.library_dirs, compiler=self.compiler, owner_name=self.owner.name+'_'+self.template_name, sources=self.sources ) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/codegen/runtime/cython_rt/cython_rt.py:143: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = code = '#cython: language_level=3\n#cython: boundscheck=False\n#cython: wraparound=False\n#cython: cdivision=False\n#cython: ... v = _array_neurongroup_v[_idx]\n _v = _lio_1 + v\n v = _v\n _array_neurongroup_v[_idx] = v' force = False, name = None, define_macros = [] include_dirs = ['/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include', '/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/pyt...es/numpy/core/include', '/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/synapses'] library_dirs = ['/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib'], runtime_library_dirs = None extra_compile_args = ['-w', '-O3', '-ffast-math', '-fno-finite-math-only', '-march=native', '-std=c++11', ...], extra_link_args = [], libraries = [], compiler = 'unix', sources = [] owner_name = 'neurongroup_stateupdate' def create_extension(self, code, force=False, name=None, define_macros=None, include_dirs=None, library_dirs=None, runtime_library_dirs=None, extra_compile_args=None, extra_link_args=None, libraries=None, compiler=None, sources=None, owner_name='', ): if sources is None: sources = [] self._simplify_paths() if Cython is None: raise ImportError('Cython is not available') code = deindent(code) lib_dir = get_cython_cache_dir() if '~' in lib_dir: lib_dir = os.path.expanduser(lib_dir) try: os.makedirs(lib_dir) except OSError: if not os.path.exists(lib_dir): raise IOError("Couldn't create Cython cache directory '%s', try setting the " "cache directly with prefs.codegen.runtime.cython.cache_dir." % lib_dir) numpy_version = '.'.join(numpy.__version__.split('.')[:2]) # Only use major.minor version key = code, sys.version_info, sys.executable, Cython.__version__, numpy_version if force: # Force a new module name by adding the current time to the # key which is hashed to determine the module name. key += time.time(), if key in self._code_cache: return self._code_cache[key] if name is not None: module_name = name#py3compat.unicode_to_str(args.name) else: module_name = "_cython_magic_" + hashlib.md5(str(key).encode('utf-8')).hexdigest() if owner_name: logger.diagnostic('"{owner_name}" using Cython module "{module_name}"'.format(owner_name=owner_name, module_name=module_name)) module_path = os.path.join(lib_dir, module_name + self.so_ext) if prefs['codegen.runtime.cython.multiprocess_safe']: lock = FileLock(os.path.join(lib_dir, module_name + '.lock')) with lock: > module = self._load_module(module_path, define_macros=define_macros, include_dirs=include_dirs, library_dirs=library_dirs, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=libraries, code=code, lib_dir=lib_dir, module_name=module_name, runtime_library_dirs=runtime_library_dirs, compiler=compiler, key=key, sources=sources) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/codegen/runtime/cython_rt/extension_manager.py:112: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = module_path = '/Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.cpython-38-darwin.so', define_macros = [] include_dirs = ['/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include', '/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/pyt...es/numpy/core/include', '/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/synapses'] library_dirs = ['/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib'], extra_compile_args = ['-w', '-O3', '-ffast-math', '-fno-finite-math-only', '-march=native', '-std=c++11', ...], extra_link_args = [] libraries = [] code = '#cython: language_level=3\n#cython: boundscheck=False\n#cython: wraparound=False\n#cython: cdivision=False\n#cython: ... v = _array_neurongroup_v[_idx]\n _v = _lio_1 + v\n v = _v\n _array_neurongroup_v[_idx] = v' lib_dir = '/Users/adam/Library/Caches/cython/brian_extensions', module_name = '_cython_magic_ab96af5f433b846928be415abd688381', runtime_library_dirs = None, compiler = 'unix' key = ('#cython: language_level=3\n#cython: boundscheck=False\n#cython: wraparound=False\n#cython: cdivision=False\n#cython:...=11, releaselevel='final', serial=0), '/Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/python', '0.29.24', '1.20') sources = [] def _load_module(self, module_path, define_macros, include_dirs, library_dirs, extra_compile_args, extra_link_args, libraries, code, lib_dir, module_name, runtime_library_dirs, compiler, key, sources): have_module = os.path.isfile(module_path) if not have_module: if define_macros is None: define_macros = [] if include_dirs is None: include_dirs = [] if library_dirs is None: library_dirs = [] if extra_compile_args is None: extra_compile_args = [] if extra_link_args is None: extra_link_args = [] if libraries is None: libraries = [] c_include_dirs = include_dirs if 'numpy' in code: import numpy c_include_dirs.append(numpy.get_include()) # TODO: We should probably have a special folder just for header # files that are shared between different codegen targets import brian2.synapses as synapses synapses_dir = os.path.dirname(synapses.__file__) c_include_dirs.append(synapses_dir) pyx_file = os.path.join(lib_dir, module_name + '.pyx') # ignore Python 3 unicode stuff for the moment #pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding()) #with io.open(pyx_file, 'w', encoding='utf-8') as f: # f.write(code) with open(pyx_file, 'w') as f: f.write(code) for source in sources: if not source.lower().endswith('.pyx'): raise ValueError('Additional Cython source files need to ' 'have an .pyx ending') # Copy source and header file (if present) to library directory shutil.copyfile(source, os.path.join(lib_dir, os.path.basename(source))) name_without_ext = os.path.splitext(os.path.basename(source))[0] header_name = name_without_ext + '.pxd' if os.path.exists(os.path.join(os.path.dirname(source), header_name)): shutil.copyfile(os.path.join(os.path.dirname(source), header_name), os.path.join(lib_dir, header_name)) final_sources = [os.path.join(lib_dir, os.path.basename(source)) for source in sources] extension = Extension( name=module_name, sources=[pyx_file], define_macros=define_macros, include_dirs=c_include_dirs, library_dirs=library_dirs, runtime_library_dirs=runtime_library_dirs, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=libraries, language='c++') build_extension = self._get_build_extension(compiler=compiler) try: opts = dict( quiet=True, annotate=False, force=True, ) # suppresses the output on stdout with std_silent(): build_extension.extensions = Cython_Build.cythonize([extension] + final_sources, **opts) build_extension.build_temp = os.path.dirname(pyx_file) build_extension.build_lib = lib_dir > build_extension.run() ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/codegen/runtime/cython_rt/extension_manager.py:258: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def run(self): from distutils.ccompiler import new_compiler # 'self.extensions', as supplied by setup.py, is a list of # Extension instances. See the documentation for Extension (in # distutils.extension) for details. # # For backwards compatibility with Distutils 0.8.2 and earlier, we # also allow the 'extensions' list to be a list of tuples: # (ext_name, build_info) # where build_info is a dictionary containing everything that # Extension instances do except the name, with a few things being # differently named. We convert these 2-tuples to Extension # instances as needed. if not self.extensions: return # If we were asked to build any C/C++ libraries, make sure that the # directory where we put them is in the library search path for # linking extensions. if self.distribution.has_c_libraries(): build_clib = self.get_finalized_command('build_clib') self.libraries.extend(build_clib.get_library_names() or []) self.library_dirs.append(build_clib.build_clib) # Setup the CCompiler object that we'll use to do all the # compiling and linking self.compiler = new_compiler(compiler=self.compiler, verbose=self.verbose, dry_run=self.dry_run, force=self.force) customize_compiler(self.compiler) # If we are cross-compiling, init the compiler now (if we are not # cross-compiling, init would not hurt, but people may rely on # late initialization of compiler even if they shouldn't...) if os.name == 'nt' and self.plat_name != get_platform(): self.compiler.initialize(self.plat_name) # And make sure that any compile/link-related options (which might # come from the command-line or from the setup script) are set in # that CCompiler object -- that way, they automatically apply to # all compiling and linking done here. if self.include_dirs is not None: self.compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for (name, value) in self.define: self.compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: self.compiler.undefine_macro(macro) if self.libraries is not None: self.compiler.set_libraries(self.libraries) if self.library_dirs is not None: self.compiler.set_library_dirs(self.library_dirs) if self.rpath is not None: self.compiler.set_runtime_library_dirs(self.rpath) if self.link_objects is not None: self.compiler.set_link_objects(self.link_objects) # Now actually compile and link everything. > self.build_extensions() ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/command/build_ext.py:340: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def build_extensions(self): # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) if self.parallel: self._build_extensions_parallel() else: > self._build_extensions_serial() ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/command/build_ext.py:449: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def _build_extensions_serial(self): for ext in self.extensions: with self._filter_build_errors(ext): > self.build_extension(ext) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/command/build_ext.py:474: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , ext = def build_extension(self, ext): sources = ext.sources if sources is None or not isinstance(sources, (list, tuple)): raise DistutilsSetupError( "in 'ext_modules' option (extension '%s'), " "'sources' must be present and must be " "a list of source filenames" % ext.name) sources = list(sources) ext_path = self.get_ext_fullpath(ext.name) depends = sources + ext.depends if not (self.force or newer_group(depends, ext_path, 'newer')): log.debug("skipping '%s' extension (up-to-date)", ext.name) return else: log.info("building '%s' extension", ext.name) # First, scan the sources for SWIG definition files (.i), run # SWIG on 'em to create .c files, and modify the sources list # accordingly. sources = self.swig_sources(sources, ext) # Next, compile the source code to object files. # XXX not honouring 'define_macros' or 'undef_macros' -- the # CCompiler API needs to change to accommodate this, and I # want to do one thing at a time! # Two possible sources for extra compiler arguments: # - 'extra_compile_args' in Extension object # - CFLAGS environment variable (not particularly # elegant, but people seem to expect it and I # guess it's useful) # The environment variable should take precedence, and # any sensible compiler will give precedence to later # command line args. Hence we combine them in order: extra_args = ext.extra_compile_args or [] macros = ext.define_macros[:] for undef in ext.undef_macros: macros.append((undef,)) > objects = self.compiler.compile(sources, output_dir=self.build_temp, macros=macros, include_dirs=ext.include_dirs, debug=self.debug, extra_postargs=extra_args, depends=ext.depends) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/command/build_ext.py:528: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , sources = ['/Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.cpp'] output_dir = '/Users/adam/Library/Caches/cython/brian_extensions', macros = [] include_dirs = ['/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include', '/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/pyt...es/numpy/core/include', '/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/synapses'] debug = None, extra_preargs = None, extra_postargs = ['-w', '-O3', '-ffast-math', '-fno-finite-math-only', '-march=native', '-std=c++11', ...] depends = ['/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/synapses/stdint_compat.h', '/Use.../Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/numpy/core/include/numpy/npy_math.h', ...] def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): """Compile one or more source files. 'sources' must be a list of filenames, most likely C/C++ files, but in reality anything that can be handled by a particular compiler and compiler class (eg. MSVCCompiler can handle resource files in 'sources'). Return a list of object filenames, one per source filename in 'sources'. Depending on the implementation, not all source files will necessarily be compiled, but all corresponding object filenames will be returned. If 'output_dir' is given, object files will be put under it, while retaining their original path component. That is, "foo/bar.c" normally compiles to "foo/bar.o" (for a Unix implementation); if 'output_dir' is "build", then it would compile to "build/foo/bar.o". 'macros', if given, must be a list of macro definitions. A macro definition is either a (name, value) 2-tuple or a (name,) 1-tuple. The former defines a macro; if the value is None, the macro is defined without an explicit value. The 1-tuple case undefines a macro. Later definitions/redefinitions/ undefinitions take precedence. 'include_dirs', if given, must be a list of strings, the directories to add to the default include file search path for this compilation only. 'debug' is a boolean; if true, the compiler will be instructed to output debug symbols in (or alongside) the object file(s). 'extra_preargs' and 'extra_postargs' are implementation- dependent. On platforms that have the notion of a command-line (e.g. Unix, DOS/Windows), they are most likely lists of strings: extra command-line arguments to prepend/append to the compiler command line. On other platforms, consult the implementation class documentation. In any event, they are intended as an escape hatch for those occasions when the abstract compiler framework doesn't cut the mustard. 'depends', if given, is a list of filenames that all targets depend on. If a source file is older than any file in depends, then the source file will be recompiled. This supports dependency tracking, but only at a coarse granularity. Raises CompileError on failure. """ # A concrete compiler class can either override this method # entirely or implement _compile(). macros, objects, extra_postargs, pp_opts, build = \ self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) for obj in objects: try: src, ext = build[obj] except KeyError: continue > self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/ccompiler.py:574: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = obj = '/Users/adam/Library/Caches/cython/brian_extensions/Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.o' src = '/Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.cpp', ext = '.cpp' cc_args = ['-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include', '-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib...python3.8/site-packages/brian2/synapses', '-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8', '-c'] extra_postargs = ['-w', '-O3', '-ffast-math', '-fno-finite-math-only', '-march=native', '-std=c++11', ...] pp_opts = ['-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include', '-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib...v/lib/python3.8/site-packages/brian2/synapses', '-I/Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8'] def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): compiler_so = self.compiler_so if sys.platform == 'darwin': compiler_so = _osx_support.compiler_fixup(compiler_so, cc_args + extra_postargs) try: self.spawn(compiler_so + cc_args + [src, '-o', obj] + extra_postargs) except DistutilsExecError as msg: > raise CompileError(msg) E distutils.errors.CompileError: command 'x86_64-apple-darwin13.4.0-clang' failed with exit status 1 ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/distutils/unixccompiler.py:120: CompileError The above exception was the direct cause of the following exception: @pytest.mark.standalone_compatible @pytest.mark.multiple_runs def test_active_flag(): G = NeuronGroup(1, 'dv/dt = 1/ms : 1') mon = StateMonitor(G, 'v', record=0) mon.active = False > run(1*ms) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/tests/test_base.py:73: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/units/fundamentalunits.py:2430: in new_f result = f(*args, **kwds) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/core/magic.py:374: in run return magic_network.run(duration, report=report, report_period=report_period, ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/core/magic.py:232: in run Network.run(self, duration, report=report, report_period=report_period, ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/core/base.py:279: in device_override_decorated_function return func(*args, **kwds) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/units/fundamentalunits.py:2430: in new_f result = f(*args, **kwds) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/core/network.py:1012: in run self.before_run(namespace) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/core/base.py:279: in device_override_decorated_function return func(*args, **kwds) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = MagicNetwork() run_namespace = {'@py_builtins': , '@pytest_ar': , 'ALLOW_THREADS': 1, 'Annotation': , ...} @device_override('network_before_run') def before_run(self, run_namespace): ''' before_run(namespace) Prepares the `Network` for a run. Objects in the `Network` are sorted into the correct running order, and their `BrianObject.before_run` methods are called. Parameters ---------- run_namespace : dict-like, optional A namespace in which objects which do not define their own namespace will be run. ''' all_objects = self.sorted_objects prefs.check_all_validated() # Check names in the network for uniqueness names = [obj.name for obj in all_objects] non_unique_names = [name for name, count in Counter(names).items() if count > 1] if len(non_unique_names): formatted_names = ', '.join("'%s'" % name for name in non_unique_names) raise ValueError('All objects in a network need to have unique ' 'names, the following name(s) were used more than ' 'once: %s' % formatted_names) # Check that there are no SummedVariableUpdaters targeting the same # target variable _check_multiple_summed_updaters(all_objects) self._stopped = False Network._globally_stopped = False device = get_device() if device.network_schedule is not None: # The device defines a fixed network schedule if device.network_schedule != self.schedule: # TODO: The human-readable name of a device should be easier to get device_name = list(all_devices.keys())[list(all_devices.values()).index(device)] logger.warn(("The selected device '{device_name}' only " "supports a fixed schedule, but this schedule is " "not consistent with the network's schedule. The " "simulation will use the device's schedule.\n" "Device schedule: {device.network_schedule}\n" "Network schedule: {net.schedule}\n" "Set the network schedule explicitly or set the " "core.network.default_schedule preference to " "avoid this warning.").format(device_name=device_name, device=device, net=self), name_suffix='schedule_conflict', once=True) logger.debug("Preparing network {self.name} with {numobj} " "objects: {objnames}".format(self=self, numobj=len(all_objects), objnames=', '.join(obj.name for obj in all_objects)), "before_run") self.check_dependencies() for obj in all_objects: if obj.active: try: obj.before_run(run_namespace) except Exception as ex: > raise BrianObjectException("An error occurred when preparing an object.", obj) from ex E brian2.core.base.BrianObjectException: Error encountered with object named "neurongroup_stateupdater". E Object was created here (most recent call only, full details in debug log): E File "/Users/adam/opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/_pytest/python.py", line 183, in pytest_pyfunc_call E result = testfunction(**testargs) E E An error occurred when preparing an object. (See above for original error message and traceback.) ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/core/network.py:903: BrianObjectException --------------------------------------------------------------------------------------------- Captured stderr call --------------------------------------------------------------------------------------------- clang-10: warning: -Wl,-export_dynamic: 'linker' input unused [-Wunused-command-line-argument] clang-10: warning: -Wl,-export_dynamic: 'linker' input unused [-Wunused-command-line-argument] clang-10: warning: -Wl,-export_dynamic: 'linker' input unused [-Wunused-command-line-argument] clang-10: warning: -Wl,-export_dynamic: 'linker' input unused [-Wunused-command-line-argument] clang-10: warning: -Wl,-export_dynamic: 'linker' input unused [-Wunused-command-line-argument] clang-10: warning: -Wl,-export_dynamic: 'linker' input unused [-Wunused-command-line-argument] In file included from /Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.cpp:46: /Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8/Python.h:14:2: error: "Something's broken. UCHAR_MAX should be defined in limits.h." #error "Something's broken. UCHAR_MAX should be defined in limits.h." ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8/Python.h:18:2: error: "Python's source code assumes C's unsigned char is an 8-bit type." #error "Python's source code assumes C's unsigned char is an 8-bit type." ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8/Python.h:27:5: error: "Python.h requires that stdio.h define NULL." # error "Python.h requires that stdio.h define NULL." ^ In file included from /Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.cpp:46: In file included from /Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8/Python.h:30: /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:73:64: error: use of undeclared identifier 'strchr' char* __libcpp_strchr(const char* __s, int __c) {return (char*)strchr(__s, __c);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:80:75: error: use of undeclared identifier 'strpbrk' char* __libcpp_strpbrk(const char* __s1, const char* __s2) {return (char*)strpbrk(__s1, __s2);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:87:65: error: use of undeclared identifier 'strrchr'; did you mean 'strchr'? char* __libcpp_strrchr(const char* __s, int __c) {return (char*)strrchr(__s, __c);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:75:13: note: 'strchr' declared here const char* strchr(const char* __s, int __c) {return __libcpp_strchr(__s, __c);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:94:49: error: unknown type name 'size_t' void* __libcpp_memchr(const void* __s, int __c, size_t __n) {return (void*)memchr(__s, __c, __n);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:96:46: error: unknown type name 'size_t' const void* memchr(const void* __s, int __c, size_t __n) {return __libcpp_memchr(__s, __c, __n);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:98:46: error: unknown type name 'size_t' void* memchr( void* __s, int __c, size_t __n) {return __libcpp_memchr(__s, __c, __n);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:101:74: error: use of undeclared identifier 'strstr'; did you mean 'strchr'? char* __libcpp_strstr(const char* __s1, const char* __s2) {return (char*)strstr(__s1, __s2);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:77:13: note: 'strchr' declared here char* strchr( char* __s, int __c) {return __libcpp_strchr(__s, __c);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:101:74: error: no matching function for call to 'strchr' char* __libcpp_strstr(const char* __s1, const char* __s2) {return (char*)strstr(__s1, __s2);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:77:13: note: candidate disabled: char* strchr( char* __s, int __c) {return __libcpp_strchr(__s, __c);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:101:81: error: cannot initialize a parameter of type 'char *' with an lvalue of type 'const char *' char* __libcpp_strstr(const char* __s1, const char* __s2) {return (char*)strstr(__s1, __s2);} ^~~~ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/string.h:77:32: note: passing argument to parameter '__s' here char* strchr( char* __s, int __c) {return __libcpp_strchr(__s, __c);} ^ In file included from /Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.cpp:46: In file included from /Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8/Python.h:34: /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/stdlib.h:142:34: error: unknown type name 'ldiv_t' inline _LIBCPP_INLINE_VISIBILITY ldiv_t div(long __x, long __y) _NOEXCEPT { ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/stdlib.h:143:12: error: no member named 'ldiv' in the global namespace return ::ldiv(__x, __y); ~~^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/stdlib.h:146:34: error: unknown type name 'lldiv_t' inline _LIBCPP_INLINE_VISIBILITY lldiv_t div(long long __x, ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/stdlib.h:148:12: error: no member named 'lldiv' in the global namespace return ::lldiv(__x, __y); ~~^ In file included from /Users/adam/Library/Caches/cython/brian_extensions/_cython_magic_ab96af5f433b846928be415abd688381.cpp:46: In file included from /Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8/Python.h:63: In file included from /Users/adam/opt/anaconda3/envs/neuroenv_braindev/include/python3.8/pyport.h:212: /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/math.h:771:93: error: no member named 'acosf' in the global namespace; did you mean 'acos'? inline _LIBCPP_INLINE_VISIBILITY float acos(float __lcpp_x) _NOEXCEPT {return ::acosf(__lcpp_x);} ~~^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/math.h:771:46: note: 'acos' declared here inline _LIBCPP_INLINE_VISIBILITY float acos(float __lcpp_x) _NOEXCEPT {return ::acosf(__lcpp_x);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/math.h:772:93: error: no member named 'acosl' in the global namespace; did you mean 'acos'? inline _LIBCPP_INLINE_VISIBILITY long double acos(long double __lcpp_x) _NOEXCEPT {return ::acosl(__lcpp_x);} ~~^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/math.h:771:46: note: 'acos' declared here inline _LIBCPP_INLINE_VISIBILITY float acos(float __lcpp_x) _NOEXCEPT {return ::acosf(__lcpp_x);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/math.h:778:38: error: call to 'acos' is ambiguous acos(_A1 __lcpp_x) _NOEXCEPT {return ::acos((double)__lcpp_x);} ^~~~~~ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/math.h:771:46: note: candidate function inline _LIBCPP_INLINE_VISIBILITY float acos(float __lcpp_x) _NOEXCEPT {return ::acosf(__lcpp_x);} ^ /Users/adam/opt/anaconda3/envs/neuroenv_braindev/bin/../include/c++/v1/math.h:772:46: note: candidate function inline _LIBCPP_INLINE_VISIBILITY long double acos(long double __lcpp_x) _NOEXCEPT {return ::acosl(__lcpp_x);} ^ fatal error: too many errors emitted, stopping now [-ferror-limit=] 20 errors generated. =========================================================================================== short test summary info ============================================================================================ FAILED ../../opt/anaconda3/envs/neuroenv_braindev/lib/python3.8/site-packages/brian2/tests/test_base.py::test_active_flag - brian2.core.base.BrianObjectException: Error encountered with object named "neuro... !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! stopping after 1 failures !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 1 failed, 414 deselected in 5.85s ERROR: 1/1 test suite(s) did not complete successfully (see above). False