NN_Jupyter

import ast
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from tensorflow.keras.callbacks import EarlyStopping

data = pd.read_csv('output7.csv')
data.head()

# Filter to only rows with labels that contain "Touch" or are "No_Gesture"
filtered_data = data[data['Label'].str.contains('Touch') | (data['Label'] == 'No_Gesture')]
#filtered_data = data[data['Label'].str.contains('Touch')]
# Check the unique labels in the filtered dataset
print(filtered_data['Label'].unique())
data = filtered_data

#data.describe()

scaler = StandardScaler()
sensor_columns = [col for col in data.columns if 'Channel' in col]
print(data.columns)

# third number in  sensor columns
for col in sensor_columns:
    data[col] = data[col].apply(lambda x: ast.literal_eval(x)[2] if isinstance(x, str) else x[2])

data[sensor_columns] = scaler.fit_transform(data[sensor_columns])

# Encode the gesture labels
label_encoder = LabelEncoder()
data['label'] = label_encoder.fit_transform(data['Label'])

# Split the data into features and target
X = data[sensor_columns]
y = data['label']

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# neural network model
#model = Sequential()
#model.add(Dense(64, input_dim=len(sensor_columns), activation='relu'))
#model.add(Dropout(0.5))
#model.add(Dense(32, activation='relu'))
#model.add(Dense(len(np.unique(y)), activation='softmax'))
model = Sequential()
model.add(Dense(128, input_dim=len(sensor_columns), activation='relu', kernel_regularizer='l2'))  # L2 regularization
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu', kernel_regularizer='l2'))
model.add(Dense(len(np.unique(y)), activation='softmax'))
# Compile the model
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

early_stopping = EarlyStopping(monitor='val_loss', patience=5)

# Train
history = model.fit(X_train, y_train, epochs=50, batch_size=32, validation_split=0.2)

# Evaluate the model on the test set
loss, accuracy = model.evaluate(X_test, y_test)
print(f'Test Accuracy: {accuracy*100:.2f}%')

# Plot training history
plt.plot(history.history['accuracy'], label='Training accuracy')
plt.plot(history.history['val_accuracy'], label='Validation accuracy')
plt.title('Training and Validation Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.show()

# Make predictions on the test set
y_pred = model.predict(X_test)
y_pred_classes = np.argmax(y_pred, axis=1)

# Display the classification report
print(classification_report(y_test, y_pred_classes))

# Display the confusion matrix
cm = confusion_matrix(y_test, y_pred_classes)
plt.figure(figsize=(10,10))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(len(np.unique(y)))
plt.xticks(tick_marks, label_encoder.classes_, rotation=45)
plt.yticks(tick_marks, label_encoder.classes_)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()