Image Classification Handson-Image - Test
Image Classification Handson-Image - Test
/usr/bin/env python
# coding: utf-8
# In[5]:
# **Data Loading**
#
# Run the below cells to load the data
# In[6]:
# load dataset
(trainX, trainy), (testX, testy) = fashion_mnist.load_data()
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainy), (testX, testY) = fashion_mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainy = to_categorical(trainy)
testY = to_categorical(testY)
return trainX, trainy, testX, testY
# **Subset Generation**
#
# - Perform data split with **StratifiedShuffleSplit** with following parameters
# - test_size = 0.08
# - random_state = seed
# - Perform train test split with **StratifiedShuffleSplit** with following
parameters
# - test_size = 0.3
# - random_state = seed
# In[8]:
seed=9
data_split = StratifiedShuffleSplit(test_size=0.08,random_state=seed)
for train_index, test_index in data_split.split(trainX, trainy):
# In[10]:
test_data = test_data_30
test_labels = test_label_30
print('train_data : ',len(train_data))
print('train_labels : ',len(train_labels))
print('test_data : ',len(test_data))
print('test_labels : ',len(test_labels))
# **Normalization**
# - Apply the mean for **data** with following parameters
# - axis = (0,1,2)
# - keepdims=True
# - Apply the square root for **data** with following parameters
# - axis = (0,1,2)
# - ddof = 1
# - keepdims = True
# - Print the shape of
# - train_data
# - test_data
# In[15]:
return data
train_data=train_data.astype('float64')
test_data=test_data.astype('float64')
# calling the function
train_data = normalize(train_data)
test_data = normalize(test_data)
# prints the shape of train data and test data
print('train_data: ',len(train_data))
print('test_data: ',len(test_data))
# **ZCA Whitening**
#
# - Print the shape of
# - train_data
# - test_data
# In[19]:
print('train_data_flat: ',len(train_data_flat))
print('test_data_flat: ',len(test_data_flat))
train_data_flat_t = train_data_flat.T
test_data_flat_t = test_data_flat.T
# In[33]:
train_data_pca = PCA(n_components=2).fit_transform(train_data_flat)
test_data_pca = PCA(n_components=2).fit_transform(test_data_flat)
print('train_data_pca: ',len(train_data_pca))
print('train_data_pca: ',len(test_data_pca))
train_data_pca = train_data_pca.T
test_data_pca = test_data_pca.T
# In[35]:
svdArray_input_data=[]
size = input_data.shape[0]
img=color.rgb2gray(input_data[i])
U, s, V = np.linalg.svd(img, full_matrices=False);
svdArray_input_data.append(S)
svdMatrix_input_data=np.matrix(svdArray_input_data)
return svdMatrix_input_data
train_data_svd=svdFeatures(train_data)
test_data_svd=svdFeatures(test_data)
print(train_data_svd.shape)
print(test_data_svd.shape)
# In[ ]:
train = clf.fit(train_data_flat_t,train_labels)
predicted=clf.predict(test_data_flat_t)
score = clf.score(test_data_flat_t,test_labels)
print("score",score)
# In[ ]: