import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from scipy.fftpack import fft, ifft
from scipy.stats import norm, t, rankdata, zscore
from scipy.ndimage import label
Chapter 34
Chapter 34
Analyzing Neural Time Series Data
Python code for Chapter 34 – converted from original Matlab by AE Studio (and ChatGPT)
Original Matlab code by Mike X Cohen
This code accompanies the book, titled “Analyzing Neural Time Series Data” (MIT Press).
Using the code without following the book may lead to confusion, incorrect data analyses, and misinterpretations of results.
Mike X Cohen and AE Studio assume no responsibility for inappropriate or incorrect use of this code.
Import necessary libraries
Extract TF power
(create data that are used for the rest of this chapter)
# Load sample data
= loadmat('../data/sampleEEGdata.mat')['EEG'][0, 0]
EEG
# Definitions, selections...
= 'FCz'
chan2use
= 3
min_freq = 30
max_freq = 20
num_frex
# Define wavelet parameters
= np.arange(-1, 1 + 1/EEG['srate'][0, 0], 1/EEG['srate'][0, 0])
time = np.logspace(np.log10(min_freq), np.log10(max_freq), num_frex)
frex = np.logspace(np.log10(3), np.log10(10), num_frex) / (2 * np.pi * frex)
s
# Define convolution parameters
= len(time)
n_wavelet = EEG['pnts'][0, 0] * EEG['trials'][0, 0]
n_data = n_wavelet + n_data - 1
n_convolution = int(2 ** np.ceil(np.log2(n_convolution)))
n_conv_pow2 = (n_wavelet - 1) // 2
half_of_wavelet_size
# Note that you don't need the wavelet itself, you need the FFT of the wavelet
= np.zeros((num_frex, n_conv_pow2), dtype=complex)
wavelets for fi in range(num_frex):
= fft(np.sqrt(1 / (s[fi] * np.sqrt(np.pi))) * np.exp(2 * 1j * np.pi * frex[fi] * time) * np.exp(-time ** 2 / (2 * (s[fi] ** 2))), n_conv_pow2)
wavelets[fi, :]
# Get FFT of data
= fft(EEG['data'][EEG['chanlocs'][0]['labels']==chan2use, :, :].flatten('F'), n_conv_pow2)
eegfft
# Initialize
= np.zeros((num_frex, EEG['pnts'][0, 0], EEG['trials'][0, 0])) # frequencies X time X trials
eegpower = np.zeros((num_frex, EEG['pnts'][0, 0], EEG['trials'][0, 0]), dtype=complex) # frequencies X time X trials
eegphase
# Loop through frequencies and compute synchronization
for fi in range(num_frex):
# Convolution
= ifft(wavelets[fi, :] * eegfft)
eegconv = eegconv[:n_convolution]
eegconv = eegconv[half_of_wavelet_size: -half_of_wavelet_size]
eegconv
# Reshape to time X trials
= np.abs(np.reshape(eegconv, (EEG['pnts'][0, 0], EEG['trials'][0, 0]), 'F')) ** 2
eegpower[fi, :, :] = np.exp(1j * np.angle(np.reshape(eegconv, (EEG['pnts'][0, 0], EEG['trials'][0, 0]), 'F')))
eegphase[fi, :, :]
# Remove edge artifacts
= np.argmin(np.abs(EEG['times'][0] - (-500)))
time_s = np.argmin(np.abs(EEG['times'][0] - 1200))
time_e
= eegpower[:, time_s:time_e+1, :]
eegpower = EEG['times'][0][time_s:time_e+1]
tftimes = len(tftimes) nTimepoints
Figure 34.1
= 0.01
voxel_pval = 0.05
cluster_pval
# Note: try to use 1000 or more permutations for real data
= 1000
n_permutes
= [np.argmin(np.abs(tftimes - t)) for t in [-500, -100]]
baseidx
# Compute actual t-test of difference
= np.mean(eegpower[:, baseidx[0]:baseidx[1]+1, :], axis=1)
realbaselines = 10 * np.log10(np.mean(eegpower, axis=2) / np.mean(realbaselines, axis=1)[:, None]) # Normalize power
realmean
# Initialize null hypothesis matrices
= np.zeros((n_permutes, 2, num_frex))
permuted_maxvals = np.zeros((n_permutes, num_frex, len(tftimes)))
permuted_vals = np.zeros(n_permutes)
max_clust_info
# Correcting the cutpoint calculation
for permi in range(n_permutes):
= np.random.choice(range(1, nTimepoints - np.diff(baseidx)[0] - 2))
cutpoint = 10 * np.log10(np.mean(np.roll(eegpower, -cutpoint, axis=1), axis=2) / np.mean(realbaselines, axis=1)[:, None])
permuted_vals[permi, :, :]
= (realmean - np.mean(permuted_vals, axis=0)) / np.std(permuted_vals, axis=0)
zmap = realmean.copy()
threshmean abs(zmap) < norm.ppf(1 - voxel_pval)] = 0
threshmean[np.
# Plotting the figures
= plt.subplots(2, 2, figsize=(10, 8))
fig, axs
# Power map
= axs[0, 0]
ax = ax.contourf(tftimes, frex, realmean, 40, cmap='viridis', levels=256, vmin=-3, vmax=3)
cf 'Power map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Unthresholded Z map
= axs[0, 1]
ax = ax.contourf(tftimes, frex, zmap, 40, cmap='viridis', levels=256, vmin=-3, vmax=3)
cf 'Unthresholded Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Uncorrected power map
= axs[1, 0]
ax = ax.contourf(tftimes, frex, threshmean, 40, cmap='viridis', levels=256, vmin=-3, vmax=3)
cf 'Uncorrected power map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
for permi in range(n_permutes):
# Cluster correction
= (permuted_vals[permi, :, :] - np.mean(permuted_vals, axis=0)) / np.std(permuted_vals, axis=0)
fakecorrsz abs(fakecorrsz) < norm.ppf(1 - voxel_pval)] = 0
fakecorrsz[np.
# Get number of elements in largest supra-threshold cluster
= label(fakecorrsz)
labeled_array, num_features = np.max([0] + [np.sum(labeled_array == i) for i in range(1, num_features + 1)])
max_clust_info[permi]
# Apply cluster-level corrected threshold
= zmap.copy()
zmapthresh # Uncorrected pixel-level threshold
abs(zmapthresh) < norm.ppf(1 - voxel_pval)] = 0
zmapthresh[np.
# Find islands and remove those smaller than cluster size threshold
= label(zmapthresh)
labeled_array, num_features = np.array([np.sum(labeled_array == i) for i in range(1, num_features + 1)])
cluster_sizes = np.percentile(max_clust_info, 100 - cluster_pval * 100)
clust_threshold
# Identify clusters to remove
= np.where(cluster_sizes < clust_threshold)[0] + 1 # +1 for 1-based indexing
whichclusters2remove
# Remove clusters
for clust in whichclusters2remove:
== clust] = 0
zmapthresh[labeled_array
# Plotting the cluster-corrected Z map
= axs[1, 1]
ax = ax.contourf(tftimes, frex, zmapthresh, 40, cmap='viridis', levels=256, vmin=-3, vmax=3)
cf 'Cluster-corrected Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
plt.tight_layout() plt.show()
Figure 34.3
= 0.05
voxel_pval = 0.05 # mcc = multiple comparisons correction
mcc_voxel_pval = 0.05
mcc_cluster_pval
# Note: try to use 1000 or more permutations for real data
= 1000
n_permutes
= np.concatenate((-np.ones(int(np.floor(EEG['trials'][0, 0] / 2))), np.ones(int(np.ceil(EEG['trials'][0, 0] / 2)))))
real_condition_mapping
# Compute actual t-test of difference (using unequal N and std)
= np.mean(eegpower[:, :, real_condition_mapping == -1], axis=2) - np.mean(eegpower[:, :, real_condition_mapping == 1], axis=2)
tnum = np.sqrt((np.std(eegpower[:, :, real_condition_mapping == -1], axis=2, ddof=1) ** 2) / np.sum(real_condition_mapping == -1) +
tdenom == 1], axis=2, ddof=1) ** 2) / np.sum(real_condition_mapping == 1))
(np.std(eegpower[:, :, real_condition_mapping = tnum / tdenom
real_t
# Initialize null hypothesis matrices
= np.zeros((n_permutes, num_frex, nTimepoints))
permuted_tvals = np.zeros((n_permutes, 2))
max_pixel_pvals = np.zeros(n_permutes)
max_clust_info
# Generate pixel-specific null hypothesis parameter distributions
for permi in range(n_permutes):
= np.sign(np.random.randn(EEG['trials'][0, 0]))
fake_condition_mapping
# Compute t-map of null hypothesis
= np.mean(eegpower[:, :, fake_condition_mapping == -1], axis=2) - np.mean(eegpower[:, :, fake_condition_mapping == 1], axis=2)
tnum = np.sqrt((np.std(eegpower[:, :, fake_condition_mapping == -1], axis=2, ddof=1) ** 2) / np.sum(fake_condition_mapping == -1) +
tdenom == 1], axis=2, ddof=1) ** 2) / np.sum(fake_condition_mapping == 1))
(np.std(eegpower[:, :, fake_condition_mapping = tnum / tdenom
tmap
# Save all permuted values
= tmap
permuted_tvals[permi, :, :]
# Save maximum pixel values
= [np.min(tmap), np.max(tmap)]
max_pixel_pvals[permi, :]
# For cluster correction, apply uncorrected threshold and get maximum cluster sizes
abs(tmap) < t.ppf(1 - voxel_pval, EEG['trials'][0, 0] - 1)] = 0
tmap[np.
# Get number of elements in largest supra-threshold cluster
= label(tmap)
labeled_array, num_features = np.max([0] + [np.sum(labeled_array == i) for i in range(1, num_features + 1)])
max_clust_info[permi]
# Now compute Z-map
= (real_t - np.mean(permuted_tvals, axis=0)) / np.std(permuted_tvals, axis=0)
zmap
= plt.subplots(2, 2, figsize=(10, 8))
fig, axs
# Unthresholded Z map
= axs[0, 0]
ax = ax.contourf(tftimes, frex, zmap, 40, cmap='viridis', levels=256)
cf 'Unthresholded Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
= zmap.copy()
zmapthresh_uncorr = norm.ppf(1 - voxel_pval)
t_thresh_uncorr abs(zmapthresh_uncorr) < t_thresh_uncorr] = 0
zmapthresh_uncorr[np.
# Uncorrected thresholded Z map
= axs[0, 1]
ax = ax.contourf(tftimes, frex, zmap, 40, cmap='viridis', levels=256)
cf =[-t_thresh_uncorr, t_thresh_uncorr], colors='k', linewidths=1)
ax.contour(tftimes, frex, zmapthresh_uncorr, levels'Unthresholded Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Apply pixel-level corrected threshold
= np.percentile(max_pixel_pvals[:, 0], mcc_voxel_pval * 100 / 2)
lower_threshold = np.percentile(max_pixel_pvals[:, 1], 100 - mcc_voxel_pval * 100 / 2)
upper_threshold
= zmap.copy()
zmapthresh > lower_threshold) & (zmap < upper_threshold)] = 0
zmapthresh[(zmap
# Pixel-corrected Z map
= axs[1, 0]
ax = ax.contourf(tftimes, frex, zmapthresh, 40, cmap='viridis', levels=256)
cf 'Pixel-corrected Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Apply cluster-level corrected threshold
= zmap.copy()
zmapthresh abs(zmapthresh) < norm.ppf(1 - voxel_pval)] = 0
zmapthresh[np.
# Find islands and remove those smaller than cluster size threshold
= label(zmapthresh)
labeled_array, num_features = np.array([np.sum(labeled_array == i) for i in range(1, num_features + 1)])
cluster_sizes = np.percentile(max_clust_info, 100 - mcc_cluster_pval * 100)
clust_threshold
# Identify clusters to remove
= np.where(cluster_sizes < clust_threshold)[0] + 1 # +1 for 1-based indexing
whichclusters2remove
# Remove clusters
for clust in whichclusters2remove:
== clust] = 0
zmapthresh[labeled_array
# Cluster-corrected Z map
= axs[1, 1]
ax = ax.contourf(tftimes, frex, zmapthresh, 40, cmap='viridis', levels=256)
cf 'Cluster-corrected Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
plt.tight_layout() plt.show()
Figure 34.4
= 0.01
voxel_pval = 0.05 # mcc = multiple comparisons correction
mcc_voxel_pval = 0.05
mcc_cluster_pval
# Note: try to use 1000 or more permutations for real data
= 1000
n_permutes
# Define covariates (RT and trial number)
= np.zeros(EEG['trials'][0][0])
rts for ei in range(EEG['trials'][0][0]):
# In this task, the button press always followed the stimulus at
# time=0. Thus, finding the RT involves finding the latency of the
# event that occurs after the time=0 event.
# If you follow a procedure like this in your data, you may need to
# include special exceptions, e.g., if there was no response or if
# a non-response marker could have occurred between stimulus and response.
= np.where(np.array(EEG['epoch'][0][ei]['eventlatency'][0]) == 0)[0][0]
time0event = EEG['epoch'][0][ei]['eventlatency'][0][time0event + 1]
rts[ei]
# Rank-transform RTs
= rankdata(rts)
rtsrank
# Rank-transform power data (must be transformed)
= np.reshape(eegpower, (num_frex * nTimepoints, EEG['trials'][0][0]), 'F').T
eegpowerreshaped = rankdata(eegpowerreshaped, axis=0).T
eegpowerrank
# Perform the matrix regression
= np.linalg.lstsq((rtsrank.T @ rtsrank)[np.newaxis, np.newaxis], (rtsrank.T @ eegpowerrank.T)[np.newaxis, :], rcond=None)[0]
realcorrs # Reshape the result to match the dimensions of frequency by time points
= np.reshape(realcorrs, (num_frex, nTimepoints), 'F')
realcorrs
# Initialize null hypothesis matrices
= np.zeros((n_permutes, num_frex, nTimepoints))
permuted_corrs = np.zeros((n_permutes, 2))
max_pixel_pvals = np.zeros(n_permutes)
max_clust_info
# Generate pixel-specific null hypothesis parameter distributions
for permi in range(n_permutes):
= rtsrank[np.random.permutation(EEG['trials'][0][0])]
fake_rt_mapping
# Compute t-map of null hypothesis
= np.linalg.lstsq((fake_rt_mapping.T @ fake_rt_mapping)[np.newaxis, np.newaxis], (fake_rt_mapping.T @ eegpowerrank.T)[np.newaxis, :], rcond=None)[0]
fakecorrs
# Reshape the result to match the dimensions of frequency by time pointsq
= np.reshape(fakecorrs, (num_frex, nTimepoints), 'F')
fakecorrs
# Save all permuted values
= fakecorrs
permuted_corrs[permi, :, :]
# Save maximum pixel values
= [np.min(fakecorrs), np.max(fakecorrs)]
max_pixel_pvals[permi, :]
# this time, the cluster correction will be done on the permuted data, thus
# making no assumptions about parameters for p-values
for permi in range(n_permutes):
# Indices of permutations to include in this iteration
= np.ones(n_permutes, dtype=bool)
perms2use4distribution = False
perms2use4distribution[permi]
# For cluster correction, apply uncorrected threshold and get maximum cluster sizes
= (permuted_corrs[permi, :, :] - np.mean(permuted_corrs[perms2use4distribution, :, :], axis=0)) / np.std(permuted_corrs[perms2use4distribution, :, :], axis=0)
fakecorrsz abs(fakecorrsz) < norm.ppf(1 - voxel_pval)] = 0
fakecorrsz[np.
# Get number of elements in largest supra-threshold cluster
= label(fakecorrsz)
labeled_array, num_features = np.max([0] + [np.sum(labeled_array == i) for i in range(1, num_features + 1)])
max_clust_info[permi]
# Now compute Z-map
= (realcorrs - np.mean(permuted_corrs, axis=0)) / np.std(permuted_corrs, axis=0)
zmap
= plt.subplots(2, 2, figsize=(10, 8))
fig, axs
# Unthresholded Z map
= axs[0, 0]
ax = ax.contourf(tftimes, frex, zmap, 40, cmap='viridis', levels=256)
cf 'Unthresholded Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
= zmap.copy()
zmapthresh_uncorr = norm.ppf(1 - voxel_pval)
z_thresh_uncorr abs(zmapthresh_uncorr) < z_thresh_uncorr] = 0
zmapthresh_uncorr[np.
# Uncorrected thresholded Z map
= axs[0, 1]
ax = ax.contourf(tftimes, frex, zmap, 40, cmap='viridis', levels=256)
cf =[-z_thresh_uncorr, z_thresh_uncorr], colors='k', linewidths=1)
ax.contour(tftimes, frex, zmapthresh_uncorr, levels'Uncorrected thresholded Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Apply pixel-level corrected threshold
= np.percentile(max_pixel_pvals[:, 0], mcc_voxel_pval * 100 / 2)
lower_threshold = np.percentile(max_pixel_pvals[:, 1], 100 - mcc_voxel_pval * 100 / 2)
upper_threshold
= zmap.copy()
zmapthresh > lower_threshold) & (realcorrs < upper_threshold)] = 0
zmapthresh[(realcorrs
# Pixel-corrected Z map
= axs[1, 0]
ax = ax.contourf(tftimes, frex, zmapthresh, 40, cmap='viridis', levels=256)
cf 'Pixel-corrected Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Apply cluster-level corrected threshold
= zmap.copy()
zmapthresh abs(zmapthresh) < norm.ppf(1 - voxel_pval)] = 0
zmapthresh[np.
# Find islands and remove those smaller than cluster size threshold
= label(zmapthresh)
labeled_array, num_features = np.array([np.sum(labeled_array == i) for i in range(1, num_features + 1)])
cluster_sizes = np.percentile(max_clust_info, 100 - mcc_cluster_pval * 100)
clust_threshold
# Identify clusters to remove
= np.where(cluster_sizes < clust_threshold)[0] + 1 # +1 for 1-based indexing
whichclusters2remove
# Remove clusters
for clust in whichclusters2remove:
== clust] = 0
zmapthresh[labeled_array
# Cluster-corrected Z map
= axs[1, 1]
ax = ax.contourf(tftimes, frex, zmapthresh, 40, cmap='viridis', levels=256)
cf 'Cluster-corrected Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
plt.tight_layout() plt.show()
Figure 34.5
= 'O1'
chan2use = [np.argmin(np.abs(EEG['times'][0] - t)) for t in [0, 250]]
time2use = np.argmin(np.abs(frex - 10))
freq2use
= fft(EEG['data'][EEG['chanlocs'][0]['labels']==chan2use, :, :].flatten('F'), n_conv_pow2)
eegfft = ifft(wavelets[freq2use, :] * eegfft)
eegconv = eegconv[:n_convolution]
eegconv = eegconv[half_of_wavelet_size: -half_of_wavelet_size]
eegconv
# Reshape to time X trials
= np.abs(np.reshape(eegconv, (EEG['pnts'][0, 0], EEG['trials'][0, 0]), 'F')) ** 2
temp = zscore(np.mean(temp[time2use[0]:time2use[1]+1, :], axis=0))
o1power
# Define covariates (RT and trial number)
= np.vstack((zscore(rtsrank), o1power))
X = rankdata(eegpowerreshaped, axis=0).T
eegpowerrank
# Plotting the covariate matrix and rank-transformed power data
= plt.subplots(2, 1, figsize=(10, 8))
fig, axs
# Covariate matrix
= axs[0]
ax = ax.imshow(X, aspect='auto', cmap='viridis')
cax
# Rank-transformed power data
= axs[1]
ax = ax.imshow(eegpowerrank, aspect='auto', cmap='viridis', interpolation='none')
cax
plt.tight_layout() plt.show()
Figure 34.6
= 0.01
voxel_pval = 0.05
mcc_cluster_pval
# Note: try to use 1000 or more permutations for real data
= 1000
n_permutes
# Perform the regression using np.linalg.lstsq
= np.linalg.lstsq(X @ X.T, X @ eegpowerrank.T, rcond=None)[0]
realbeta = np.reshape(realbeta, (2, num_frex, nTimepoints), order='F')
realbeta
# Initialize null hypothesis matrices
= np.zeros((n_permutes, 2, num_frex, nTimepoints))
permuted_bvals = np.zeros((n_permutes, 2))
max_clust_info
# Generate pixel-specific null hypothesis parameter distributions
for permi in range(n_permutes):
# Randomly shuffle trial order
= X[:, np.random.permutation(EEG['trials'][0][0])]
fakeX
# Compute beta-map of null hypothesis
= np.linalg.lstsq(fakeX @ fakeX.T, fakeX @ eegpowerrank.T, rcond=None)[0]
fakebeta = np.reshape(fakebeta, (2, num_frex, nTimepoints), order='F')
fakebeta
# Save all permuted values
= fakebeta
permuted_bvals[permi, :, :, :]
# Cluster correction will be done on the permuted data
for permi in range(n_permutes):
for testi in range(2):
# Apply uncorrected threshold and get maximum cluster sizes
= (permuted_bvals[permi, testi, :, :] - np.mean(permuted_bvals[:, testi, :, :], axis=0)) / np.std(permuted_bvals[:, testi, :, :], axis=0)
fakecorrsz abs(fakecorrsz) < norm.ppf(1 - voxel_pval)] = 0
fakecorrsz[np.
# Get number of elements in largest supra-threshold cluster
= label(fakecorrsz)
labeled_array, num_features = np.max([0] + [np.sum(labeled_array == i) for i in range(1, num_features + 1)])
max_clust_info[permi, testi]
# Plotting the figures for Figure 34.6
= plt.subplots(2, 3, figsize=(15, 10))
fig, axs
for testi in range(2):
# Now compute Z-map
= (realbeta[testi, :, :] - np.mean(permuted_bvals[:, testi, :, :], axis=0)) / np.std(permuted_bvals[:, testi, :, :], axis=0)
zmap
# Unthresholded Z map
= axs[testi, 0]
ax = ax.contourf(tftimes, frex, zmap, 40, cmap='viridis', levels=256)
cf f'Unthresholded Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Apply uncorrected threshold
= zmap.copy()
zmapthresh abs(zmapthresh) < norm.ppf(1 - voxel_pval)] = 0
zmapthresh[np.
# Uncorrected thresholded Z map
= axs[testi, 1]
ax = ax.contourf(tftimes, frex, zmapthresh, 40, cmap='viridis', levels=256)
cf f'Uncorrected thresholded Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Apply cluster-level corrected threshold
= np.percentile(max_clust_info[:, testi], 100 - mcc_cluster_pval * 100)
clust_threshold
# Find islands and remove those smaller than cluster size threshold
= label(zmapthresh)
labeled_array, num_features = np.array([np.sum(labeled_array == i) for i in range(1, num_features + 1)])
cluster_sizes
# Identify clusters to remove
= np.where(cluster_sizes < clust_threshold)[0] + 1 # +1 for 1-based indexing
whichclusters2remove
# Remove clusters
for clust in whichclusters2remove:
== clust] = 0
zmapthresh[labeled_array
# Cluster-corrected Z map
= axs[testi, 2]
ax = ax.contourf(tftimes, frex, zmapthresh, 40, cmap='viridis', levels=256)
cf f'Cluster-corrected Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
plt.tight_layout() plt.show()
Figure 34.7
# Generate random data
= np.random.rand(10000, 1)
a = np.random.rand(10000, 1)
b
# Create figure and axes
=(12, 8))
plt.figure(figsize
# Histogram of 'a'
= plt.subplot(221)
ax =50, color='k', alpha=0.75)
ax.hist(a, bins-.05, 1.05])
ax.set_xlim([
# Histogram of 'b'
= plt.subplot(222)
ax =50, color='k', alpha=0.75)
ax.hist(b, bins-.05, 1.05])
ax.set_xlim([
# Histogram of the differences (using Fisher's Z transformation)
= plt.subplot(212)
ax - b), bins=50, color='k', alpha=0.75)
ax.hist(np.arctanh(a -2, 2])
ax.set_xlim(['ITPC differences')
ax.set_title('Difference value')
ax.set_xlabel('Count')
ax.set_ylabel(
plt.tight_layout() plt.show()
Figure 34.8
The code to produce this figure is presented in chapter 19, between the cells for figures 19.6 and 19.7. To generate figure 34.8, you will need to run the code for figures 19.2-6.
Figure 34.9
= 0.01
voxel_pval = 0.05
cluster_pval
# Note: try to use 1000 or more permutations for real data
= 1000
n_permutes
# Compute actual ITPC
= np.abs(np.mean(eegphase, axis=2))
realitpc
# Initialize null hypothesis matrices
= np.zeros((n_permutes, 2, num_frex))
permuted_maxvals = np.zeros((n_permutes, num_frex, EEG['pnts'][0][0]))
permuted_vals = np.zeros(n_permutes)
max_clust_info = np.zeros(eegphase.shape, dtype=complex)
eegtemp
for permi in range(n_permutes):
for triali in range(EEG['trials'][0][0]):
= np.random.choice(range(2, nTimepoints - 1))
cutpoint # Permute phase values
= np.roll(eegphase[:, :, triali], cutpoint, axis=1)
eegtemp[:, :, triali] = np.abs(np.mean(eegtemp, axis=2))
permuted_vals[permi, :, :]
= (realitpc - np.mean(permuted_vals, axis=0)) / np.std(permuted_vals, axis=0)
zmap = realitpc.copy()
threshmean abs(zmap) < norm.ppf(1 - voxel_pval)] = 0
threshmean[np.
# Plotting the figures for Figure 34.9
= plt.subplots(2, 2, figsize=(10, 8))
fig, axs
# Power map
= axs[0, 0]
ax = ax.contourf(EEG['times'][0], frex, realitpc, 40, cmap='viridis', levels=256)
cf -200, 1000])
ax.set_xlim(['Power map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Unthresholded Z map
= axs[0, 1]
ax = ax.contourf(EEG['times'][0], frex, zmap, 40, cmap='viridis', levels=256)
cf -200, 1000])
ax.set_xlim(['Unthresholded Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# Uncorrected power map
= axs[1, 0]
ax = ax.contourf(EEG['times'][0], frex, threshmean, 40, cmap='viridis', levels=256)
cf -200, 1000])
ax.set_xlim(['Uncorrected power map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
# this time, the cluster correction will be done on the permuted data, thus
# making no assumptions about parameters for p-values
for permi in range(n_permutes):
# Cluster correction
= (permuted_vals[permi, :, :] - np.mean(permuted_vals, axis=0)) / np.std(permuted_vals, axis=0)
fakecorrsz abs(fakecorrsz) < norm.ppf(1 - voxel_pval)] = 0
fakecorrsz[np.
# Get number of elements in largest supra-threshold cluster
= label(fakecorrsz)
labeled_array, num_features = np.max([0] + [np.sum(labeled_array == i) for i in range(1, num_features + 1)])
max_clust_info[permi]
# Cluster-corrected Z map
# Apply cluster-level corrected threshold
= realitpc.copy()
zmapthresh abs(zmap) < norm.ppf(1 - voxel_pval)] = 0
zmapthresh[np.
# Find islands and remove those smaller than cluster size threshold
= label(zmapthresh)
labeled_array, num_features = np.array([np.sum(labeled_array == i) for i in range(1, num_features + 1)])
cluster_sizes = np.percentile(max_clust_info, 100 - cluster_pval * 100)
clust_threshold
# Identify clusters to remove
= np.where(cluster_sizes < clust_threshold)[0] + 1 # +1 for 1-based indexing
whichclusters2remove
# Remove clusters
for clust in whichclusters2remove:
== clust] = 0
zmapthresh[labeled_array
= axs[1, 1]
ax = ax.contourf(EEG['times'][0], frex, zmapthresh, 40, cmap='viridis', levels=256)
cf -200, 1000])
ax.set_xlim(['Cluster-corrected Z map')
ax.set_title('Time (ms)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(
plt.tight_layout() plt.show()
Figure 33.5
The code for figures 33.5/6 are presented here and in the next cell. You will need first to run the code for figure 34.3 to run this code.
# Compute actual t-test of difference (using unequal N and std)
= np.mean(eegpower[3, 399, real_condition_mapping == -1]) - np.mean(eegpower[3, 399, real_condition_mapping == 1])
tnum = np.sqrt((np.std(eegpower[3, 399, real_condition_mapping == -1], ddof=1) ** 2) / np.sum(real_condition_mapping == -1) +
tdenom 3, 399, real_condition_mapping == 1], ddof=1) ** 2) / np.sum(real_condition_mapping == 1))
(np.std(eegpower[= tnum / tdenom
real_t
# Set up the range of permutations to test
= np.round(np.linspace(100, 3000, 200)).astype(int)
n_permutes_range = np.zeros(len(n_permutes_range))
zvals
# Perform the permutation test for different numbers of permutations
for idx, n_permutes in enumerate(n_permutes_range):
= np.zeros(n_permutes)
permuted_tvals
# Generate null hypothesis parameter distributions
for permi in range(n_permutes):
= np.sign(np.random.randn(EEG['trials'][0][0]))
fake_condition_mapping = np.mean(eegpower[3, 399, fake_condition_mapping == -1]) - np.mean(eegpower[3, 399, fake_condition_mapping == 1])
tnum = np.sqrt((np.std(eegpower[3, 399, fake_condition_mapping == -1], ddof=1) ** 2) / np.sum(fake_condition_mapping == -1) +
tdenom 3, 399, fake_condition_mapping == 1], ddof=1) ** 2) / np.sum(fake_condition_mapping == 1))
(np.std(eegpower[= tnum / tdenom
permuted_tvals[permi]
= (real_t - np.mean(permuted_tvals)) / np.std(permuted_tvals)
zvals[idx]
# Plot the stability of Z-value as a function of the number of permutations
= plt.subplots(2, 1, figsize=(8, 8))
fig, axs
# Plot Z-values
= axs[0]
ax ='o', linestyle='-')
ax.plot(n_permutes_range, zvals, marker'Number of permutations')
ax.set_xlabel('Z-value')
ax.set_ylabel('Stability of Z-value')
ax.set_title(
# Histogram of Z-values
= axs[1]
ax =30, color='k', alpha=0.75)
ax.hist(zvals, bins'Z-value')
ax.set_xlabel('Count')
ax.set_ylabel('Histogram of Z-values at different runs of permutation test')
ax.set_title(
plt.tight_layout() plt.show()
Figure 33.6
# Compute actual t-test of difference (using unequal N and std)
= np.mean(eegpower[:, :, real_condition_mapping == -1], axis=2) - np.mean(eegpower[:, :, real_condition_mapping == 1], axis=2)
tnum = np.sqrt((np.std(eegpower[:, :, real_condition_mapping == -1], axis=2, ddof=1) ** 2) / np.sum(real_condition_mapping == -1) +
tdenom == 1], axis=2, ddof=1) ** 2) / np.sum(real_condition_mapping == 1))
(np.std(eegpower[:, :, real_condition_mapping = tnum / tdenom
real_t
# Set up the range of permutations to test
= np.round(np.linspace(100, 3000, 200)).astype(int)
n_permutes_range = np.zeros((len(n_permutes_range), tnum.shape[0], tnum.shape[1]))
zvals
# Perform the permutation test for different numbers of permutations
for grandpermi, n_permutes in enumerate(n_permutes_range):
= np.zeros((n_permutes, tnum.shape[0], tnum.shape[1]))
permuted_tvals
# Generate null hypothesis parameter distributions
for permi in range(n_permutes):
= np.sign(np.random.randn(EEG['trials'][0][0]))
fake_condition_mapping = np.mean(eegpower[:, :, fake_condition_mapping == -1], axis=2) - np.mean(eegpower[:, :, fake_condition_mapping == 1], axis=2)
tnum = np.sqrt((np.std(eegpower[:, :, fake_condition_mapping == -1], axis=2, ddof=1) ** 2) / np.sum(fake_condition_mapping == -1) +
tdenom == 1], axis=2, ddof=1) ** 2) / np.sum(fake_condition_mapping == 1))
(np.std(eegpower[:, :, fake_condition_mapping = tnum / tdenom
permuted_tvals[permi, :, :]
= (real_t - np.mean(permuted_tvals, axis=0)) / np.std(permuted_tvals, axis=0)
zvals[grandpermi, :, :]
# Plot the stability of Z-value as a function of the number of permutations for the selected time-frequency point
= plt.subplots(2, 1, figsize=(8, 8))
fig, axs
# Plot Z-values for the selected time-frequency point
= axs[0]
ax 3, 399], linestyle='-')
ax.plot(n_permutes_range, zvals[:,
# Histogram of Z-values for the selected time-frequency point
= axs[1]
ax 3, 399], bins=30, alpha=0.75)
ax.hist(zvals[:,
plt.tight_layout()
plt.show()
# Plot the average and standard deviation of Z-statistics across all time-frequency points
= zvals.reshape(len(n_permutes_range), num_frex * nTimepoints)
zvals_all = plt.subplots(figsize=(8, 6))
fig, ax =0), np.std(zvals_all, axis=0), 'o')
ax.plot(np.mean(zvals_all, axis-3.5, 3.5])
ax.set_xlim([0, 0.12])
ax.set_ylim(['Average Z-statistic')
ax.set_xlabel('Standard deviation of Z-statistics')
ax.set_ylabel(
plt.show()
plt.figure()
= np.argmin(np.abs(np.mean(zvals_all, axis=0)))
z0 = np.unravel_index(z0, tnum.shape)
x0, y0 = np.histogram(zvals[:, x0, y0], bins=30)
yy, xx = np.diff(xx) # Calculate the width of each bin
width = (xx[:-1] + xx[1:]) / 2 # Calculate the center of each bin
center = plt.bar(center, yy, width=width, align='center')
h
= np.argmin(np.abs(np.mean(zvals_all, axis=0)-2))
z2 = np.unravel_index(z2, tnum.shape)
x2, y2 = np.histogram(zvals[:, x2, y2], bins=30)
yy, xx = np.diff(xx) # Calculate the width of each bin
width = (xx[:-1] + xx[1:]) / 2 # Calculate the center of each bin
center = plt.bar(center, yy, width=width, align='center')
h
= np.argmin(np.abs(np.mean(zvals_all, axis=0)+3))
z3 = np.unravel_index(z3, tnum.shape)
x3, y3 = np.histogram(zvals[:, x3, y3], bins=30)
yy, xx = np.diff(xx) # Calculate the width of each bin
width = (xx[:-1] + xx[1:]) / 2 # Calculate the center of each bin
center = plt.bar(center, yy, width=width, align='center')
h
-3.5, 3.5])
plt.xlim(['Z-value')
plt.xlabel('Count of possible z-values')
plt.ylabel(
plt.show()