TSR Univariate
In [1]:
Copied!
from tslearn.datasets import UCR_UEA_datasets
import numpy as np
import torch
import seaborn as sns
import sklearn
from ClassificationModels.LSTM_T import LSTM
from ClassificationModels.CNN_T import ResNetBaseline, get_all_preds, fit, UCRDataset
from tslearn.datasets import UCR_UEA_datasets
import numpy as np
import torch
import seaborn as sns
import sklearn
from ClassificationModels.LSTM_T import LSTM
from ClassificationModels.CNN_T import ResNetBaseline, get_all_preds, fit, UCRDataset
/home/jacqueline/.local/share/virtualenvs/TSInterpret-x4eqnPOt/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm
In [2]:
Copied!
#Load Data
dataset='GunPoint'
X_train,y_train, X_test, y_test=UCR_UEA_datasets().load_dataset(dataset)
train_x=X_train.reshape(-1,X_train.shape[-2],X_train.shape[-1])
test_x=X_test.reshape(-1,X_train.shape[-2],X_train.shape[-1])
train_y = y_train
test_y=y_test
enc1=sklearn.preprocessing.OneHotEncoder(sparse=False).fit(np.vstack((train_y.reshape(-1,1),test_y.reshape(-1,1))))
#pickle.dump(enc1,open(f'../../ClassificationModels/models/{dataset}/OneHotEncoder.pkl','wb'))
train_y=enc1.transform(train_y.reshape(-1,1))
test_y=enc1.transform(test_y.reshape(-1,1))
print(train_y)
#Load Model
n_pred_classes =len(np.unique(train_y))
train_dataset = UCRDataset(train_x.astype(np.float64),train_y.astype(np.int64))
test_dataset = UCRDataset(test_x.astype(np.float64),test_y.astype(np.int64))
train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=16,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,batch_size=1,shuffle=False)
#Load Data
dataset='GunPoint'
X_train,y_train, X_test, y_test=UCR_UEA_datasets().load_dataset(dataset)
train_x=X_train.reshape(-1,X_train.shape[-2],X_train.shape[-1])
test_x=X_test.reshape(-1,X_train.shape[-2],X_train.shape[-1])
train_y = y_train
test_y=y_test
enc1=sklearn.preprocessing.OneHotEncoder(sparse=False).fit(np.vstack((train_y.reshape(-1,1),test_y.reshape(-1,1))))
#pickle.dump(enc1,open(f'../../ClassificationModels/models/{dataset}/OneHotEncoder.pkl','wb'))
train_y=enc1.transform(train_y.reshape(-1,1))
test_y=enc1.transform(test_y.reshape(-1,1))
print(train_y)
#Load Model
n_pred_classes =len(np.unique(train_y))
train_dataset = UCRDataset(train_x.astype(np.float64),train_y.astype(np.int64))
test_dataset = UCRDataset(test_x.astype(np.float64),test_y.astype(np.int64))
train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=16,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,batch_size=1,shuffle=False)
[[0. 1.] [0. 1.] [1. 0.] [1. 0.] [0. 1.] [0. 1.] [0. 1.] [0. 1.] [0. 1.] [1. 0.] [1. 0.] [1. 0.] [1. 0.] [1. 0.] [0. 1.] [1. 0.] [0. 1.] [0. 1.] [1. 0.] [0. 1.] [1. 0.] [1. 0.] [1. 0.] [0. 1.] [1. 0.] [0. 1.] [1. 0.] [1. 0.] [0. 1.] [1. 0.] [1. 0.] [0. 1.] [0. 1.] [1. 0.] [0. 1.] [1. 0.] [0. 1.] [0. 1.] [0. 1.] [0. 1.] [0. 1.] [1. 0.] [1. 0.] [1. 0.] [0. 1.] [0. 1.] [1. 0.] [0. 1.] [1. 0.] [0. 1.]]
/home/jacqueline/.local/share/virtualenvs/TSInterpret-x4eqnPOt/lib/python3.9/site-packages/sklearn/preprocessing/_encoders.py:972: FutureWarning: `sparse` was renamed to `sparse_output` in version 1.2 and will be removed in 1.4. `sparse_output` is ignored unless you leave `sparse` to its default value. warnings.warn(
In [3]:
Copied!
model = LSTM(1, 10 ,2 ,0.1)#ResNetBaseline(in_channels=1, num_pred_classes=n_pred_classes)
#fit(model,train_loader,test_loader)
model.load_state_dict(torch.load(f'../../ClassificationModels/models/{dataset}/LSTM'))
model.eval()
model = LSTM(1, 10 ,2 ,0.1)#ResNetBaseline(in_channels=1, num_pred_classes=n_pred_classes)
#fit(model,train_loader,test_loader)
model.load_state_dict(torch.load(f'../../ClassificationModels/models/{dataset}/LSTM'))
model.eval()
Out[3]:
LSTM( (drop): Dropout(p=0.1, inplace=False) (fc): Linear(in_features=10, out_features=2, bias=True) (rnn): LSTM(1, 10, batch_first=True) )
In [4]:
Copied!
item = np.array([test_x[0,:,:]],dtype=np.float64)
label =0
item = np.array([test_x[0,:,:]],dtype=np.float64)
label =0
In [5]:
Copied!
#TODO SLIDING WINDOW
from TSInterpret.InterpretabilityModels.Saliency.TSR import TSR
int_mod=TSR(model, train_x.shape[-2],train_x.shape[-1], method='FO', mode='time')
print(item.shape)
exp= int_mod.explain(item,labels=label,TSR =True)
#TODO SLIDING WINDOW
from TSInterpret.InterpretabilityModels.Saliency.TSR import TSR
int_mod=TSR(model, train_x.shape[-2],train_x.shape[-1], method='FO', mode='time')
print(item.shape)
exp= int_mod.explain(item,labels=label,TSR =True)
2023-09-18 12:55:40.118252: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-09-18 12:55:41.436679: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
(1, 150, 1)
In [6]:
Copied!
print(exp.shape)
print(np.array([test_x[0,:,:]]).shape)
int_mod.plot(np.array([test_x[0,:,:]]),exp, figsize=(30,30))
print(exp.shape)
print(np.array([test_x[0,:,:]]).shape)
int_mod.plot(np.array([test_x[0,:,:]]),exp, figsize=(30,30))
(150, 1) (1, 150, 1) time mode