In [1]:
Copied!
import sklearn
import pickle
import numpy as np
import torch
from ClassificationModels.CNN_T import ResNetBaseline, get_all_preds, fit, UCRDataset
from ClassificationModels.LSTM_T import LSTM
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
import pandas as pd
import os
from tslearn.datasets import UCR_UEA_datasets
import sklearn
import pickle
import numpy as np
import torch
from ClassificationModels.CNN_T import ResNetBaseline, get_all_preds, fit, UCRDataset
from ClassificationModels.LSTM_T import LSTM
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
import pandas as pd
import os
from tslearn.datasets import UCR_UEA_datasets
/home/jacqueline/.local/share/virtualenvs/TSInterpret-x4eqnPOt/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm
Parameters¶
In [2]:
Copied!
#dataset='GunPoint'f
dataset='BasicMotions'
#dataset='GunPoint'f
dataset='BasicMotions'
Load Data¶
In [3]:
Copied!
train_x,train_y, test_x, test_y=UCR_UEA_datasets().load_dataset(dataset)
train_x,train_y, test_x, test_y=UCR_UEA_datasets().load_dataset(dataset)
In [4]:
Copied!
enc1=sklearn.preprocessing.OneHotEncoder(sparse=False).fit(np.vstack((train_y.reshape(-1,1),test_y.reshape(-1,1))))
pickle.dump(enc1,open(f'../../ClassificationModels/models/{dataset}/OneHotEncoder.pkl','wb'))
train_y=enc1.transform(train_y.reshape(-1,1))
test_y=enc1.transform(test_y.reshape(-1,1))
enc1=sklearn.preprocessing.OneHotEncoder(sparse=False).fit(np.vstack((train_y.reshape(-1,1),test_y.reshape(-1,1))))
pickle.dump(enc1,open(f'../../ClassificationModels/models/{dataset}/OneHotEncoder.pkl','wb'))
train_y=enc1.transform(train_y.reshape(-1,1))
test_y=enc1.transform(test_y.reshape(-1,1))
/home/jacqueline/.local/share/virtualenvs/TSInterpret-x4eqnPOt/lib/python3.9/site-packages/sklearn/preprocessing/_encoders.py:972: FutureWarning: `sparse` was renamed to `sparse_output` in version 1.2 and will be removed in 1.4. `sparse_output` is ignored unless you leave `sparse` to its default value. warnings.warn(
Model Training¶
In [5]:
Copied!
n_pred_classes =train_y.shape[1]
train_dataset = UCRDataset(train_x.astype(np.float64),train_y.astype(np.int64))
test_dataset = UCRDataset(test_x.astype(np.float64),test_y.astype(np.int64))
train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=16,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,batch_size=1,shuffle=False)
device='cpu'
hidden_size=10
rnn=0.1
model = LSTM(6, hidden_size ,n_pred_classes,rnndropout=rnn).to(device)
#fit(model,train_loader,test_loader)
if dataset in os.listdir('../../ClassificationModels/models/'):
print('Folder exists')
else:
os.mkdir(f'../../ClassificationModels/models/{dataset}')
#torch.save(model.state_dict(), f'../../ClassificationModels/models/{dataset}/LSTM')
model.load_state_dict(torch.load(f'../../ClassificationModels/models/{dataset}/LSTM'))
model.eval()
test_preds, ground_truth = get_all_preds(model, test_loader)
ground_truth=np.argmax(ground_truth,axis=1)
sns.set(rc={'figure.figsize':(5,4)})
heatmap=confusion_matrix(ground_truth, test_preds)
sns.heatmap(heatmap, annot=True)
plt.savefig(f'../../ClassificationModels/models/{dataset}/LSTM_confusion_matrix.png')
plt.close()
acc= accuracy_score(ground_truth, test_preds)
a = classification_report(ground_truth, test_preds, output_dict=True)
dataframe = pd.DataFrame.from_dict(a)
dataframe.to_csv(f'../../ClassificationModels/models/{dataset}/LSTMclassification_report.csv', index = False)
n_pred_classes =train_y.shape[1]
train_dataset = UCRDataset(train_x.astype(np.float64),train_y.astype(np.int64))
test_dataset = UCRDataset(test_x.astype(np.float64),test_y.astype(np.int64))
train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=16,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,batch_size=1,shuffle=False)
device='cpu'
hidden_size=10
rnn=0.1
model = LSTM(6, hidden_size ,n_pred_classes,rnndropout=rnn).to(device)
#fit(model,train_loader,test_loader)
if dataset in os.listdir('../../ClassificationModels/models/'):
print('Folder exists')
else:
os.mkdir(f'../../ClassificationModels/models/{dataset}')
#torch.save(model.state_dict(), f'../../ClassificationModels/models/{dataset}/LSTM')
model.load_state_dict(torch.load(f'../../ClassificationModels/models/{dataset}/LSTM'))
model.eval()
test_preds, ground_truth = get_all_preds(model, test_loader)
ground_truth=np.argmax(ground_truth,axis=1)
sns.set(rc={'figure.figsize':(5,4)})
heatmap=confusion_matrix(ground_truth, test_preds)
sns.heatmap(heatmap, annot=True)
plt.savefig(f'../../ClassificationModels/models/{dataset}/LSTM_confusion_matrix.png')
plt.close()
acc= accuracy_score(ground_truth, test_preds)
a = classification_report(ground_truth, test_preds, output_dict=True)
dataframe = pd.DataFrame.from_dict(a)
dataframe.to_csv(f'../../ClassificationModels/models/{dataset}/LSTMclassification_report.csv', index = False)
Folder exists
Explanation Algo¶
In [6]:
Copied!
item=test_x[0].reshape(1,-1,6)
shape=item.shape
_item= torch.from_numpy(item).float()
model.eval()
y_target= model(_item).detach().numpy()
#y_target = torch.nn.functional.softmax(model(_item)).detach().numpy()
item=test_x[0].reshape(1,-1,6)
shape=item.shape
_item= torch.from_numpy(item).float()
model.eval()
y_target= model(_item).detach().numpy()
#y_target = torch.nn.functional.softmax(model(_item)).detach().numpy()
In [7]:
Copied!
from TSInterpret.InterpretabilityModels.Saliency.SaliencyMethods_PTY import Saliency_PTY
int_mod=Saliency_PTY(model,NumTimeSteps=train_x.shape[-2],NumFeatures=train_x.shape[-1], method='FA', mode ='time')
from TSInterpret.InterpretabilityModels.Saliency.SaliencyMethods_PTY import Saliency_PTY
int_mod=Saliency_PTY(model,NumTimeSteps=train_x.shape[-2],NumFeatures=train_x.shape[-1], method='FA', mode ='time')
2023-09-18 13:59:41.345000: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-09-18 13:59:42.368222: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
In [8]:
Copied!
print(np.array([test_x[0,:,:]]).shape)
print(np.array([test_x[0,:,:]]).shape)
(1, 100, 6)
In [9]:
Copied!
exp=int_mod.explain(np.array([test_x[0,:,:]]),labels =1 ,TSR = True)
exp=int_mod.explain(np.array([test_x[0,:,:]]),labels =1 ,TSR = True)
In [10]:
Copied!
int_mod.plot(np.array([test_x[0,:,:]]),exp)
int_mod.plot(np.array([test_x[0,:,:]]),exp)
time mode
In [11]:
Copied!
int_mod.plot(np.array([test_x[0,:,:]]),exp, heatmap = True)
int_mod.plot(np.array([test_x[0,:,:]]),exp, heatmap = True)
time mode