LSM on spiking nuerons

Description of problem

Hi Brian2 community. I have a problem building liquid state mashine with LIF neurons. Could you please assist me with correct formulas or other changes to my code to make it work?

I have 1 neuron in input to generate spikes to LSM lauer, LSM (i want also to apply STDP to it) and output of 5 neurons to classify input (later want to apply another learning rule on LSM→output synapses)

Minimal code to reproduce problem

from brian2 import *
import numpy as np
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt

start_scope()

# Parameters
N_input = 1
N_exc = 400
N_inh = 100
N_output = 5
tau = 10*ms
E_rest = -70*mV
E_exc = 0*mV
E_inh = -80*mV
V_th = -50*mV
V_reset = -60*mV
t_ref = 2*ms
dt = 0.1*ms
defaultclock.dt = dt
duration = 400*ms

stimulus = TimedArray(resampled * volt, dt=dt)

eqs_input = '''
dv/dt = (-v + stimulus(t)) / (10*ms) : volt (unless refractory)
'''

input_group = NeuronGroup(N_input, eqs_input, threshold='v > -1.3*volt', reset='v = -2*volt', refractory=t_ref, method='euler')

eqs = '''
dv/dt = (E_rest - v + g_e*(E_exc - v) + g_i*(E_inh - v)) / tau : volt (unless refractory)
dg_e/dt = -g_e / (5*ms) : 1
dg_i/dt = -g_i / (10*ms) : 1
'''

exc_group = NeuronGroup(N_exc, eqs, threshold='v>V_th', reset='v=V_reset', refractory=t_ref, method='euler')
exc_group.v = E_rest + (V_th - E_rest) * rand()

inh_group = NeuronGroup(N_inh, eqs, threshold='v>V_th', reset='v=V_reset', refractory=t_ref, method='euler')
inh_group.v = E_rest + (V_th - E_rest) * rand()

output_group = NeuronGroup(N_output, eqs, threshold='v>V_th', reset='v=V_reset', refractory=t_ref, method='euler')
output_group.v = E_rest + (V_th - E_rest) * rand()

# Connections
S_input_exc = Synapses(input_group, exc_group, on_pre='g_e_post += 0.4*rand()')
S_input_exc.connect(p=0.07)

S_input_inh = Synapses(input_group, inh_group, on_pre='g_e_post += 0.2*rand()')
S_input_inh.connect(p=0.07)

S_ee = Synapses(exc_group, exc_group, on_pre='g_e_post += 0.6*rand()')
S_ee.connect(condition='i!=j', p=0.1)

S_ei = Synapses(exc_group, inh_group, on_pre='g_e_post += 0.4*rand()')
S_ei.connect(p=0.1)

S_ie = Synapses(inh_group, exc_group, on_pre='g_i_post += 1.2*rand()')
S_ie.connect(p=0.1)

S_ii = Synapses(inh_group, inh_group, on_pre='g_i_post += 0.4*rand()')
S_ii.connect(condition='i!=j', p=0.1)

# Reservoir to output connections
S_exc_out = Synapses(exc_group, output_group, on_pre='g_e_post += 0.6*rand()')
S_exc_out.connect(p=0.1)

S_inh_out = Synapses(inh_group, output_group, on_pre='g_i_post += 1.2*rand()')
S_inh_out.connect(p=0.1)

statemon_exc = StateMonitor(exc_group, 'v', record=True)
statemon_inh = StateMonitor(inh_group, 'v', record=True)
statemon_out = StateMonitor(output_group, 'v', record=True)
spikemon_out = SpikeMonitor(output_group)
spikemon_input = SpikeMonitor(input_group)
spikemon_exc = SpikeMonitor(exc_group)

run(duration)

# Output activity for training
output_activity = statemon_out.v.T

# Plot output neuron firing
plt.figure()
plt.plot(spikemon_out.t/ms, spikemon_out.i, '.')
plt.xlabel('Time (ms)')
plt.ylabel('Neuron index')
plt.title('Output Neuron Spikes')
plt.show()

plt.figure()
plt.plot(spikemon_input.t/ms, spikemon_input.i, '.')
plt.xlabel('Time (ms)')
plt.ylabel('Neuron index')
plt.title('Input Neuron Spikes')
plt.show()

plt.figure()
plt.plot(spikemon_exc.t/ms, spikemon_exc.i, '.')
plt.xlabel('Time (ms)')
plt.ylabel('Neuron index')
plt.title('2nd Layer Neuron Spikes')
plt.show()

What you have aready tried

Expected output (if relevant)

Actual output (if relevant)

Full traceback of error (if relevant)