This project is a web-based visualization tool that leverages D3.js to display EEG band powers through a dynamic bar chart. It aims to provide an intuitive understanding of EEG data by representing different band powers, such as Delta, Theta, Alpha, Beta, and Gamma, in a visually appealing and accessible format. The data is fetched and processed in JSON format, ensuring flexibility and ease of integration with various EEG data sources. This tool is designed for researchers, educators, and enthusiasts interested in exploring the patterns and insights that can be derived from EEG signals.
This project can be further enhanced and extended in the following ways:
# Python script to fetch EEG data, compute band powers, and save to JSON file
import numpy as np
import mne
from mne.time_frequency import psd_array_multitaper
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from pathlib import Path
# Fetch the MNE sample data
data_path = Path(mne.datasets.sample.data_path())
raw_file = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_file, preload=True)
# Visualizing the raw data
raw.plot(n_channels=15, duration=10, scalings='auto')
events = mne.find_events(raw, stim_channel='STI 014')
# Filter the raw data
raw.filter(l_freq=0.1, h_freq=50)
# Epoching around events
event_ids = {'auditory/left': 1, 'auditory/right': 2}
epochs = mne.Epochs(raw, events, event_id=event_ids, tmin=-0.2, tmax=0.5, baseline=None, reject={'eeg': 100e-6}, preload=True)
# Visualizing the PSD of epochs
epochs.plot_psd(fmin=0.5, fmax=50)
# Compute PSD for each epoch
n_epochs, n_channels, n_times = epochs.get_data().shape
data_reshaped = epochs.get_data().reshape(n_epochs * n_channels, n_times)
# psds, freqs = psd_array_multitaper(data_reshaped, sfreq=epochs.info['sfreq'], adaptive=True, normalization='full', verbose=0)
psds, freqs = psd_array_multitaper(data_reshaped, sfreq=epochs.info['sfreq'], adaptive=False, normalization='full', verbose=0)
psds = psds.reshape(n_epochs, n_channels, -1)
# Band power extraction
feature_list = []
for psd in psds:
delta_power = np.trapz(psd[:, (freqs >= 0.5) & (freqs <= 4)].mean(axis=0))
theta_power = np.trapz(psd[:, (freqs >= 4) & (freqs <= 8)].mean(axis=0))
alpha_power = np.trapz(psd[:, (freqs >= 8) & (freqs <= 13)].mean(axis=0))
beta_power = np.trapz(psd[:, (freqs >= 13) & (freqs <= 30)].mean(axis=0))
gamma_power = np.trapz(psd[:, (freqs >= 30) & (freqs <= 40)].mean(axis=0))
feature_list.append([delta_power, theta_power, alpha_power, beta_power, gamma_power])
# Visualizing topographies for specific bands
freq_bands = {'delta': (0.5, 4), 'theta': (4, 8), 'alpha': (8, 13), 'beta': (13, 30), 'gamma': (30, 40)}
for band, (fmin, fmax) in freq_bands.items():
epochs_band = epochs.copy().filter(fmin, fmax)
power = np.sum(epochs_band.get_data() ** 2, axis=2)
mean_power = power.mean(axis=0)
if len(mean_power.shape) == 1:
mean_power = mean_power[:, np.newaxis]
power_evoked = mne.EvokedArray(mean_power, epochs.info)
# power_evoked.plot_topomap(title=f'{band} band power', scalings=1, cmap='viridis', time_format='')
power_evoked.plot_topomap(scalings=1, cmap='viridis', time_format='')
plt.title(f'{band} band power')
# Extracting labels
# labels = [event[2] for event in events if event[2] in [1, 2]]
# labels = [event[2] for event in events if event[2] in [1, 2]][:len(epochs)]
valid_events = [event[2] for event in events if event[2] in [1, 2]]
print("Number of valid events:", len(valid_events))
# labels = valid_events[:len(epochs)]
labels = [event[2] for event in events if event[2] in [1, 2]][:len(epochs)]
print("Number of feature vectors:", len(feature_list))
print("Number of labels:", len(labels))
print("Number of epochs:", len(epochs))
unique_events, counts = np.unique([event[2] for event in events], return_counts=True)
print("Event IDs and their counts:", dict(zip(unique_events, counts)))
assert len(feature_list) == len(labels), "Mismatch between feature vectors and labels!"
# Standardize features
scaler = StandardScaler()
X = scaler.fit_transform(feature_list)
# SVM with GridSearchCV
parameters = {'C': [0.1, 1, 10], 'kernel': ['linear', 'rbf']}
svc = SVC(class_weight='balanced')
clf = GridSearchCV(svc, parameters, cv=5)
clf.fit(X, labels)
print(f"Best parameters: {clf.best_params_}")
print(f"Best cross-validation accuracy: {clf.best_score_:.2f}")
# Visualizing the SVM results
if clf.best_estimator_.kernel == 'linear':
feature_names = ['delta', 'theta', 'alpha', 'beta', 'gamma']
plt.bar(feature_names, clf.best_estimator_.coef_[0])
plt.title('SVM Weights')
plt.ylabel('Weight Value')
plt.xlabel('Frequency Band')
plt.show()
import json
# Assuming feature_list contains band powers for each epoch
average_band_powers = np.mean(feature_list, axis=0)
bands = ['delta', 'theta', 'alpha', 'beta', 'gamma']
data_to_export = [{'band': band, 'value': value} for band, value in zip(bands, average_band_powers)]
json_data = json.dumps(data_to_export)
# Save to a JSON file
with open('eeg_band_powers.json', 'w') as json_file:
json.dump(data_to_export, json_file)
// Fetch the JSON data
d3.json("eeg_band_powers.json").then(function (data) {
// Get the dimensions of the container
const container = d3.select("#chart");
const containerRect = container.node().getBoundingClientRect();
let containerWidth = containerRect.width;
// Calculate height based on aspect ratio (e.g., 16:9)
let containerHeight = containerWidth * (9 / 16);
// Get the computed style of the container to check max-width and max-height
const containerStyle = window.getComputedStyle(container.node());
const maxWidth = parseInt(containerStyle.getPropertyValue('max-width'), 10);
const maxHeight = parseInt(containerStyle.getPropertyValue('max-height'), 10);
// Ensure containerWidth does not exceed maxWidth
if (!isNaN(maxWidth) && containerWidth > maxWidth) {
containerWidth = maxWidth;
// Recalculate containerHeight based on the new containerWidth
containerHeight = containerWidth * (9 / 16);
}
// Ensure containerHeight does not exceed maxHeight
if (!isNaN(maxHeight) && containerHeight > maxHeight) {
containerHeight = maxHeight;
}
// Set up SVG canvas dimensions dynamically
const margin = { top: 20, right: 20, bottom: 30, left: 40 };
const width = containerWidth - margin.left - margin.right;
const height = containerHeight - margin.top - margin.bottom;
// Create SVG element
let svg = container
.append("svg")
.attr("width", containerWidth)
.attr("height", containerHeight)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
// Create scales
let xScale = d3.scaleBand().domain(data.map(d => d.band)).range([0, width]).padding(0.2);
let yScale = d3.scaleLinear().domain([0, d3.max(data, d => d.value)]).range([height, 0]);
// Add bars
svg.selectAll("rect")
.data(data)
.enter().append("rect")
.attr("x", d => xScale(d.band))
.attr("y", d => yScale(d.value))
.attr("width", xScale.bandwidth())
.attr("height", d => height - yScale(d.value));
// Add x-axis
svg.append("g")
.attr("transform", "translate(0," + height + ")")
.call(d3.axisBottom(xScale));
// Add y-axis
svg.append("g")
.call(d3.axisLeft(yScale));
});
Project Acknowledgements:
This project was inspired by the growing interest in EEG data visualization and analysis within the scientific community. Special thanks to the developers of D3.js, MNE-Python, and other open-source tools that have contributed to the success of this project. We also acknowledge the support of our collaborators and mentors who have provided valuable feedback and guidance throughout the development process.