import numpy as np
import matplotlib.pyplot as plt
Chapter 10
Chapter 10
Analyzing Neural Time Series Data
Python code for Chapter 10 – converted from original Matlab by AE Studio (and ChatGPT)
Original Matlab code by Mike X Cohen
This code accompanies the book, titled “Analyzing Neural Time Series Data” (MIT Press).
Using the code without following the book may lead to confusion, incorrect data analyses, and misinterpretations of results.
Mike X Cohen and AE Studio assume no responsibility for inappropriate or incorrect use of this code.
Import necessary libraries
Dot products
# two vectors of random numbers
= np.random.randn(10, 1)
a = np.random.randn(10, 1)
b
# initialize temporary matrix.
= np.zeros(a.shape)
pointwise_result
for i in range(len(a)):
= a[i] * b[i]
pointwise_result[i] = np.sum(pointwise_result)
dotproduct
# The above code is useful if you are unfamiliar with
# how a dot product works. Following is a bit more elegant:
= np.sum(a * b)
dotproduct
# The most elegant way to compute the dot product,
# however, is to use numpy's dot function:
= np.dot(a.T, b) dotproduct
Figure 10.2
# impulse function (all zeros; 1 in the middle)
= np.zeros(100)
impfun 49] = 1
impfun[# the figure in the book actually uses the following line, which creates a
# wider boxcar function rather than strictly an impulse function.
44:55] = 1
impfun[
= np.array([1, 0.8, 0.6, 0.4, 0.2])
kernel
# numpy's convolution function
= np.convolve(impfun, kernel, 'same')
numpy_conv_result
plt.figure()
# plot the signal (impulse or boxcar)
311)
plt.subplot(
plt.plot(impfun)-.1, 1.1])
plt.ylim([
# plot the kernel
312)
plt.subplot('.-')
plt.plot(kernel, 0, 100])
plt.xlim([-.1, 1.1])
plt.ylim([
# plot the result of convolution
313)
plt.subplot(
plt.plot(numpy_conv_result)0, 100])
plt.xlim([-.1, 3.6])
plt.ylim([
plt.show()
Figure 10.4
# data that we'll use for convolution (must be zero-padded).
= np.concatenate((np.zeros(len(kernel) - 1), impfun, np.zeros(len(kernel) - 1)))
dat4conv
# used for cutting the result of convolution
= int(np.ceil((len(kernel) - 1) / 2))
half_of_kernel_size
# initialize convolution output
= np.zeros(len(impfun) + len(kernel) - 1)
convolution_result
# run convolution (note that kernel is flipped backwards)
for ti in range(len(convolution_result) - half_of_kernel_size):
= np.sum(dat4conv[ti:ti + len(kernel)] * kernel[::-1])
convolution_result[ti]
# cut off edges
= convolution_result[half_of_kernel_size:-half_of_kernel_size]
convolution_result
plt.figure()
plt.plot(impfun)'g')
plt.plot(convolution_result, / np.sum(kernel), 'r')
plt.plot(convolution_result / np.sum(kernel), 'ko')
plt.plot(numpy_conv_result 0, 100])
plt.xlim([-.1, 3.1])
plt.ylim(['original timeseries', 'unscaled convolution', 'manual wavelet convolution', 'numpy conv function'])
plt.legend([ plt.show()