import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from skimage.measure import label as sk_label, regionprops
from scipy.signal import convolve2d
from statsmodels.stats.multitest import multipletests
Chapter 33
Chapter 33
Analyzing Neural Time Series Data
Python code for Chapter 33 – converted from original Matlab by AE Studio (and ChatGPT)
Original Matlab code by Mike X Cohen
This code accompanies the book, titled “Analyzing Neural Time Series Data” (MIT Press).
Using the code without following the book may lead to confusion, incorrect data analyses, and misinterpretations of results.
Mike X Cohen and AE Studio assume no responsibility for inappropriate or incorrect use of this code.
Import necessary libraries
Figure 33.1
=(10, 5))
plt.figure(figsize
# Subplot 1
121)
plt.subplot(= np.linspace(-4, 4, 8000)
x
plt.plot(x, norm.pdf(x))=True, axis='both', tight=True)
plt.gca().autoscale(enable
# Subplot 2
122)
plt.subplot(= np.random.randn(1000)
a 50)
plt.hist(a,
plt.show()
# Display probabilities
print(f"p_n = {np.sum(a > 2) / 1000}")
print(f"p_z = {1 - norm.cdf(2):.5f}")
p_n = 0.022
p_z = 0.02275
Figure 33.5/6
These figures are generated in the code for chapter 34
Figure 33.8
# Create 2D smoothing kernel
= np.meshgrid(np.arange(-10, 11), np.arange(-10, 11))
xi, yi = xi**2 + yi**2
zi = 1 - (zi / np.max(zi))
zi
# Create a random smoothed map
map = convolve2d(np.random.randn(100, 100), zi, mode='same')
# Threshold map at an arbitrary value
= map.copy()
mapt abs(map) < np.ptp(map) / 4] = 0
mapt[np.
# Need a binary map for sk_label
= mapt.copy()
mapt1 != 0] = 1
mapt1[mapt1
# Get labeled map via sk_label
= sk_label(mapt1, return_num=True, connectivity=2)
mapl, nblobs
# Extract information from clusters
= np.zeros(nblobs)
clustcount = np.zeros(nblobs)
clustsum for i in range(nblobs):
= np.sum(mapl == i + 1)
clustcount[i] = np.sum(map[mapl == i + 1])
clustsum[i]
# Regionprops works slightly differently but will give similar information
= regionprops(mapl)
blobinfo = np.zeros(nblobs)
clustcount = np.zeros(nblobs)
clustsum for i, blob in enumerate(blobinfo):
= blob.area
clustcount[i] = np.sum(map[blob.coords[:, 0], blob.coords[:, 1]])
clustsum[i]
# Cluster count can be done faster using list comprehension
= [blob.area for blob in blobinfo]
clustercount
# Plotting
=(15, 5))
plt.figure(figsize131)
plt.subplot(map.T, cmap='viridis')
plt.imshow('original')
plt.title(
132)
plt.subplot(='viridis')
plt.imshow(mapt.T, cmap'thresholded')
plt.title(
133)
plt.subplot(='viridis')
plt.imshow(mapl.T, cmapf'labeled ({nblobs} clusters in total)')
plt.title(
plt.tight_layout() plt.show()
Figure 33.10
= np.round(np.linspace(1, 500, 80)).astype(int)
nsigs = np.round(np.linspace(1, 500, 100)).astype(int)
nnons
= np.zeros((20, len(nsigs), len(nnons)))
fdrpvals
for iteri in range(20):
for i, nsig in enumerate(nsigs):
for j, nnonsig in enumerate(nnons):
= np.concatenate((np.random.rand(nsig) * 0.05, np.random.rand(nnonsig) * 0.5 + 0.05))
pvals = multipletests(pvals, alpha=0.05, method='fdr_bh')
reject, pvals_corrected, _, _ = np.max(pvals[reject]) if np.any(reject) else np.nan
fdr_threshold = fdr_threshold
fdrpvals[iteri, i, j]
= np.nan_to_num(fdrpvals)
fdrpvals = np.nanmean(fdrpvals, axis=0)
fdrpvals
plt.figure()='auto')
plt.imshow(fdrpvals, aspect0, 0.05)
plt.clim('number of non-significant p-values')
plt.xlabel('number of significant p-values')
plt.ylabel(
plt.show()
=(10, 10))
plt.figure(figsize211)
plt.subplot(=0))
plt.plot(np.nanmean(fdrpvals, axis'number of non-significant p-values')
plt.xlabel('critical p-value')
plt.ylabel(0, 100)
plt.xlim(0, 0.05)
plt.ylim(
212)
plt.subplot(=1))
plt.plot(np.nanmean(fdrpvals, axis'number of significant p-values')
plt.xlabel('critical p-value')
plt.ylabel(0, 80)
plt.xlim(0, 0.05)
plt.ylim(
plt.tight_layout() plt.show()