Description of problem
Hi,
I’m new to Brian (and neuroscience in general) and I have been trying to implement the model from Izhikevich 2006 in Brian2. I have been using this Brian1 code as reference. However, I am struggling to implement the STDP rule and the numerical integration scheme. The STDP rule in Brian1 uses functions that I could not find in Brian2. My state updater also differs from the Brian1 implementation. The main difference in output is that the raster plot from the original paper has delta rhythms which mine lacks.
Minimal code to reproduce problem
start_scope()
defaultclock.dt = 0.5*ms
I_factor = float((1*ms)/dt)
M = 100
D = 20*ms
N_e = 800
a_e = 0.02/ms
d_e = 8*mV/ms
s_e = 6*mV/ms * I_factor
N_i = 200
a_i = 0.1 / ms
d_i = 2 * mV / ms
s_i = -5 * mV / ms * I_factor
b = 0.2/ms
c = -65*mV
sm = 10 * mV / ms * I_factor
eqs = '''
dv/dt = (0.04/ms/mV)*v**2+(5/ms)*v+140*mV/ms-u+I : volt
du/dt = a*(b*v-u) : volt/second
a : 1/second
d : volt/second
I : volt/second
'''
reset = '''
v = c
u += d
'''
new_state_updater = ExplicitStateUpdater('''
x_support = x + 0.5 * dt *f(x,t)
x_new = x_support + 0.5 * dt *f(x,t)
''')
StateUpdateMethod.register('mymethod', new_state_updater)
G = NeuronGroup(N_e + N_i, model = eqs, threshold='v>30*mV', reset=reset, method='mymethod')
''' Scheme from Brian1
def Izhikevich_scheme(G):
G.v += 0.5 * dt * ((0.04 / ms / mV) * G.v ** 2 + (5 / ms) * G.v + 140 * mV / ms - G.u + G.I)
G.v += 0.5 * dt * ((0.04 / ms / mV) * G.v ** 2 + (5 / ms) * G.v + 140 * mV / ms - G.u + G.I)
G.u += dt * G.a * (b * G.v - G.u)
G._state_updater = Izhikevich_scheme '''
Ge = G[:N_e]
Gi = G[N_e:]
G.v = c
G.u = b * c
Ge.a = a_e
Gi.a = a_i
Ge.d = d_e
Gi.d = d_i
thalamic_input_weight = 20*mV/ms
change_every = int(1*ms/dt)
change_time = 0
thal_ind = 0
@network_operation(dt=1 * ms)
def thalamic_input():
global change_time, thal_ind
G.I = 0 * mV / ms
G.I[thal_ind] = thalamic_input_weight
if change_time==0:
change_time = change_every
thal_ind = randint(N)
#G.I[randint(N)] = float(thalamic_input_weight*1*ms/dt)
change_time -= 1
artificial_wmax = 1e10 * mV / ms
taupre = taupost = 20*ms
wmax = artificial_wmax
Apre = (0.1 * mV / ms / artificial_wmax)
Apost = (-0.12 * mV / ms / artificial_wmax)
sm = 10 * mV / ms * I_factor
synapsesE = Synapses(Ge, G, '''
w_e : volt/second
dapre/dt = -apre/taupre : volt/second (event-driven)
dapost/dt = -apost/taupost : volt/second (event-driven)
''',
on_pre='''
I += w_e
apre += Apre
w_e = clip(w_e+apost, 0*mV/ms, wmax)
''',
on_post='''
apost += Apost
w_e = clip(w_e+apre, 0*mV/ms, wmax)
''')
# Connection matrix based on matlab code from original paper
# Matrix of postsynaptic indices, 100 random connections per neuron
C_e = np.array([np.random.choice(1000, 100, replace=False) for _ in range(800)])
M_e = np.zeros((N_e, N))
#Converting into matrix of 1s and 0s (1 for connection)
for x, y in np.ndenumerate(C_e):
M_e[x[0], int(y)] = 1
sourcesE, targetsE = M_e.nonzero()
synapsesE.connect(i=sourcesE, j=targetsE)
synapsesE.delay = 'ceil(j/5)*ms' # Delay based on matlab code from original paper
synapsesE.w_e = s_e
@network_operation(dt=1 * ms)
def update_weights_from_derivative():
synapsesE.w_e = clip(synapsesE.w_e + 0.01 * mV / ms + synapsesE.w_e, 0, sm)
C_i = np.array([np.random.choice(800, 100, replace=False) for _ in range(200)])
M_i = np.zeros((N_i, N))
for x, y in np.ndenumerate(C_i):
M_i[x[0], int(y)] = 1
sourcesI, targetsI = M_i.nonzero()
synapsesI = Synapses(Gi, Ge, model = 'w_i : volt/second', on_pre='I+=w_i', delay=1*ms)
synapsesI.connect(i=sourcesI, j=targetsI)
synapsesI.w_i = s_i
spikemon = SpikeMonitor(G)
run(1*second)
plot(spikemon.t/ms, spikemon.i, '.k')
xlim([0, 1000*1])
ylim([0, 1000])
xlabel('Time (ms)')
ylabel('Neuron index')
This is parts of the STDP from Brian1
# In order to implement Izhikevich's STDP rule, we need two weight matrices,
# one is the actual weight matrix and the second is a weight matrix derivative.
# To do this with Brian's sparse matrices, we create a second Connection
# Ce_deriv, which we initialise to have the same shape as Ce, but we use the
# forget() function on the Connection so that although the object exists,
# no spikes will be propagated by that Connection. We need the object to exist
# because we create an ExponentialSTDP object that acts on Ce_deriv not
# directly on Ce.
Ce_deriv = Connection(Ge, G, 'I', delay=True, max_delay=D)
forget(Ce_deriv)
# Now we duplicate Ce to Ce_deriv
for i in xrange(Ne):
Ce_deriv[i, :] = Ce[i, :]
Ce_deriv.delay[i, :] = Ce.delay[i, :]
# STDP acts directly on Ce_deriv rather than Ce. With this STDP rule alone,
# we would not see any learning, the network operation below propagates changes
# in Ce_deriv to Ce.
artificial_wmax = 1e10 * mV / ms
stdp = ExponentialSTDP(Ce_deriv,
taup=20 * ms, taum=20 * ms,
Ap=(0.1 * mV / ms / artificial_wmax),
Am=(-0.12 * mV / ms / artificial_wmax),
wmin= -artificial_wmax,
wmax=artificial_wmax,
interactions='nearest'
)
# Izhikevich's STDP rule has STDP acting on a matrix sd of derivatives, and
# then each second the weight matrix s and derivates sd are updated according
# to the rule:
# s <- s+0.01+sd
# sd <- sd*0.9
# Note also that we are using Brian's sparse matrices, but they are have
# exactly the same nonzero entries, and therefore we can do arithmetic
# operations on these matrices using the alldata attribute of Brian sparse
# matrices. The compress() method converts the ConstructionMatrix into a
# ConnectionMatrix, thus freezing the nonzero entries. In the next line, we
# want the actual starting values of Ce_deriv to be zero, but if we had done
# this before compressing to a ConnectionMatrix, all entries would be
# considered zero entries in the sparse matrix, and then Ce and Ce_deriv would
# have a different pattern of non-zero entries.
Ce_deriv.compress()
Ce_deriv.W.alldata[:] = 0
@network_operation(clock=EventClock(dt=1 * second))
def update_weights_from_derivative():
Ce.W.alldata[:] = clip(Ce.W.alldata + 0.01 * mV / ms + Ce_deriv.W.alldata, 0, sm)
Ce_deriv.W.alldata[:] *= 0.9