Deep Learning Manual (1)
Deep Learning Manual (1)
We will look at all the aspects of the image so we need to import different libraries including
You can use any image from your system. I have an image named’image.jpg’ for which I will
be performing feature extraction.
Here we can see that the colored image contains rows, columns, and channels as it is a
colored image there are three channels RGB while grayscale pictures have only one channel.
So we can clearly identify the colored and grayscale images by their shapes.
4. Feature Extraction
Pixel Features
The number of pixels in an image is the same as the size of the image for grayscale images
we can find the pixel features by reshaping the shape of the image and returning the array
form of the image.
PROGRAM
import pandas as pd
import numpy as np
%matplotlib inline
1
image = imread("C:/python/image.jpg", as_gray=True)
imshow(image)
print(image.shape)
print(image)
image = imread("C:/python/image.jpg")
imshow(image)
print(image.shape)
(524, 800, 3)
image = imread("C:/python/image.jpg")
feature_matrix_image = np.zeros((375,500))
feature_matrix_image
2
feature_matrix_image.shape
RESULT:
Thus the program for feature extraction from image data was executed successfully.
3
Ex.No.2 PROGRAM FOR FEATURE EXTRACTION FROM VIDEO
AIM:
To write a program for feature extraction from video data.
ALGORITHM
We will look at all the aspects of the image so we need to import different libraries including
NumPy, pandas, etc.
You can use any image from your system, stored as video.mp3 performing feature extraction.
Here we can see that the video contains rows, columns, and channels as it is a colored image
there are three channels RGB while grayscale pictures have only one channel. So we can
clearly identify the colored and grayscale images by their shapes.
4. Feature Extraction
Pixel Features
The number of pixels in an video is the same as the size of the video where we can find the
pixel features
PROGRAM
import os
import sys
import json
import subprocess
import numpy as np
import torch
4
from mean import get_mean
classify_video
def parse_opts():
opt = parse_opts()
opt.mean = get_mean()
opt.sample_size = 112
opt.sample_duration = 16
opt.n_classes = 400
model = generate_model(opt)
model_data = torch.load(opt.model)
model.load_state_dict(model_data['state_dict'])
model.eval()
if opt.verbose:
print(model)
input_files = []
for row in f:
input_files.append(row[:-1])
class_names = []
with open('class_names_list') as f:
for row in f:
class_names.append(row[:-1])
ffmpeg_loglevel = 'quiet'
if opt.verbose:
5
ffmpeg_loglevel = 'info'
6
if os.path.exists('tmp'):
outputs = []
if os.path.exists(video_path):
print(video_path)
subprocess.call('mkdir tmp',
shell=True)
subprocess.call('ffmpeg -i {} tmp/image_%05d.jpg'.format(video_path),
shell=True)
outputs.append(result)
else:
if os.path.exists('tmp'):
shell=True)
json.dump(outputs, f)
RESULT:
7
Thus the program for video feature extraction was executed.
8
Ex.No.3 PROGRAM FOR IMAGE CLASSIFICATION
AIM
To write a program for image classification using python
ALGORITHM
Import the necessary libraries which are required for performing CNN tasks.
A convoluted image can be too large and so it is reduced without losing features or patterns, so
pooling is done.
Here Creating a Neural network is to initialize the network using the Sequential model from
Keras.
we are required to specify optimizers.
In this step, we will see how to set the data directory and generate image data.
PROGRAM
import numpy as np
import random
X_train = np.loadtxt("C:/python/input.csv",delimiter=',')
Y_train = np.loadtxt("C:/python/labels.csv",delimiter=',')
X_test = np.loadtxt("C:/python/input_test.csv",delimiter=',')
9
Y_test = np.loadtxt("C:/python/labels_test.csv",delimiter=',')
X_train = X_train.reshape(len(X_train),100,100,3)
Y_train = Y_train.reshape(len(Y_train),1)
X_train = X_train/255.0
X_test = X_test/255.0
X_test = X_test.reshape(len(X_test),100,100,3)
Y_test =Y_test.reshape(len(Y_test),1)
idx = random.randint(0,
len(X_train)) plt.imshow(X_train[idx,
:]) plt.show()
10
model = Sequential([
MaxPoolin2D((2,2)),
MaxPooling2D(2,2)),
Flatten(),
Dense(64, activation='relu'),
])
model= Sequential()
model.add(MaxPooling2D((2,2)))
11
model.add(Conv2D(32, (3,3), activation='relu'))
model.add(MaxPooling2D((2,2)))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.evaluate(X_test, Y_test)
idX2=random.randint(0, len(Y_test))
plt.imshow(X_test[idX2, :])
plt.show()
Y_pred= Y_pred>0.5
if(Y_pred==0):
pred='dog'
else:
pred='cat'
12
print("Our model says it is a :", pred)
RESULT:
Thus the program for image classification was executed successfully
13
Ex.No.4 PROGRAM FOR IMAGE RECOGNITION
AIM
To write a program for image recognition
ALGORITHM
Train Data: Train data contains the 200 images of each car and plane, i.e. in total, there
are 400 images in the training dataset
Test Data: Test data contains 50 images of each car and plane i.e., includes a total.
There are 100 images in the test dataset
14
PROGRAM
train_data_dir = 'v_data/train'
validation_data_dir = 'v_data/test'
nb_train_samples =400
nb_validation_samples = 100
epochs = 10
batch_size = 16
if K.image_data_format() == 'channels_first':
else:
model = Sequential()
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
15
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen =
ImageDataGenerator( rescale=
1. / 255, shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary'
16
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('model_saved.h5')
import numpy as np
model = load_model('model_saved.h5')
17
img = np.array(image)
img = img.reshape(1,224,224,3)
label = model.predict(img)
RESULT:
Thus the program for image recognition was executed successfully
18
Ex.No.5 PROGRAM FOR VIDEO RECOGNITION
AIM
To write a program for video recognition
ALGORITHM
1. Loop over all frames in the video file
8. An empty output/ folder is the location where we’ll store video classification results.
PROGRAM
import cv2
import face_recognition
input_movie = cv2.VideoCapture("sample_video.mp4")
length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
image = face_recognition.load_image_file("sample_image.jpeg")
face_encoding = face_recognition.face_encodings(image)[0]
known_faces = [
face_encoding,
# Initialize variables
19
face_locations = []
face_encodings = []
face_names = []
frame_number = 0
while True:
frame_number += 1
if not ret:
break
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition
uses)
# Find all the faces and face encodings in the current frame of video
face_names = []
name = None
if match[0]:
20
face_names.append(name)
if not name:
continue
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)
output_movie.write(frame)
# All done!
input_movie.release()
cv2.destroyAllWindows()
21
RESULT:
Thus the program for video recognition was executed successfully
22
Ex.No.6 PROGRAM FOR TRANSFER LEARNING
AIM
To write a program for transfer learning
ALGORITHM
PROGRAM
import numpy as np
import cv2
import os
import tensorflow as tf
23
from tensorflow.keras.models import Sequential
classifier = tf.keras.Sequential([
hub.KerasLayer("https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4",
input_shape=IMAGE_SHAPE+(3,))
])
gold_fish = Image.open("C:/python/goldfish.jpg").resize(IMAGE_SHAPE)
gold_fish
gold_fish = np.array(gold_fish)/255.0
gold_fish.shape
gold_fish[np.newaxis, ...]
24
result = classifier.predict(gold_fish[np.newaxis, ...])
result.shape
result.shape
predicted_label_index = np.argmax(result)
predicted_label_index
image_labels = []
image_labels = f.read().splitlines()
image_labels[:5]
image_labels[predicted_label_index]
RESULT:
25
Ex.No.7 FEATURE EXTRACTION FROM IMAGE DATA
AIM
To write a program for feature extraction from image data
ALGORITHM
We will look at all the aspects of the image so we need to import different libraries including
NumPy, pandas, etc.
You can use any image from your system. I have an image named’image.jpg’ for which I will
be performing feature extraction.
Here we can see that the colored image contains rows, columns, and channels as it is a
colored image there are three channels RGB while grayscale pictures have only one channel.
So we can clearly identify the colored and grayscale images by their shapes.
4. Feature Extraction
Pixel Features
The number of pixels in an image is the same as the size of the image for grayscale images
we can find the pixel features by reshaping the shape of the image and returning the array
form of the image.
PROGRAM
import pandas as pd
import numpy as np
%matplotlib inline
26
imshow(image)
image.shape, image
import numpy as np
%matplotlib inline
27
#reading the image
image = imread("C:/python/image.jpg",as_gray=True)
edges_prewitt_horizontal = prewitt_h(image)
edges_prewitt_vertical = prewitt_v(image)
imshow(edges_prewitt_vertical, cmap='gray')
RESULT:
Thus the program for feature extraction from image was executed successfully
28
Ex.No.8 PROGRAM FOR IMAGE COLORIZATION
AIM
To write a program for image colorization
ALGORITHM
1. Load the model and the convolution/kernel points
2. Read and preprocess the image
3. Generate model predictions using the L channel from our input image
4. Use the output -> ab channel to create a resulting image
Lab colour has 3 channels L, a, and b. But here instead of pixel values,
these have different significances i.e :
L-channel: light intensity
a channel: green-red encoding
b channel: blue-red encoding
PROGRAM
import numpy as np
import cv2
proto_file = 'Model\colorization_deploy_v2.prototxt'
model_file = 'Model\colorization_release_v2.caffemodel'
hull_pts = 'Model\pts_in_hull.npy'
img_path = 'images/img1.jpg'
# # #
net = dnn.readNetFromCaffe(proto_file,model_file)
kernel = np.load(hull_pts)
# # #
29
#-----Reading and preprocessing image #
img = cv2.imread(img_path)
# # #
class8 = net.getLayerId("class8_ab")
conv8 = net.getLayerId("conv8_313_rh")
net.getLayer(class8).blobs = [pts.astype("float32")]
# # #
L = cv2.split(resized)[0]
# mean subtraction
L -= 50
# # #
net.setInput(cv2.dnn.blobFromImage(L))
# input image
30
# Take the L channel from the image
L = cv2.split(lab_img)[0]
colorized = np.clip(colorized, 0, 1)
# change the image to 0-255 range and convert it from float32 to int
img = cv2.resize(img,(640,640))
colorized = cv2.resize(colorized,(640,640))
result = cv2.hconcat([img,colorized])
cv2.waitKey(0)
31
OUTPUT:
RESULT:
Thus the program for image colorization was executed successfully.
32
Ex.No.9 PROGRAM FOR TWITTER SENTIMENT ANALYSIS AIM
To write a program for twitter sentiment analysis
ALGORITHM
1. Sentiment analysis is the process of determining whether a piece of writing is
positive, negative or neutral.
2. Install Tweepy: is the python client for the official
3. Twitter API
PROGRAM
# precprcess tweet
tweet_words = []
word = '@user'
elif word.startswith('http'):
word = "http"
tweet_words.append(word)
33
tweet_proc = " ".join(tweet_words)
roberta = "cardiffnlp/twitter-roberta-base-sentiment"
model = AutoModelForSequenceClassification.from_pretrained(roberta)
tokenizer = AutoTokenizer.from_pretrained(roberta)
# sentiment analysis
output = model(**encoded_tweet)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
for i in range(len(scores)):
l = labels[i]
s = scores[i]
print(l,s)
34
OUTPUT:
RESULT:
Thus the program for twitter sentiment analysis was executed successfully
35
Ex.No.10 PROGRAM FOR BACKPROPAGATION
AIM
To write a program for backpropagation with CNN.
ALGORITHM
PROGRAM
import numpy as np
y = y/100
def derivatives_sigmoid(x):
return x * (1 - x)
epoch=5
lr=0.1
inputlayer_neurons = 2
hiddenlayer_neurons = 3
output_neurons = 1
36
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))
EO = y-output
outgrad = derivatives_sigmoid(output)
d_output = EO * outgrad
EH = d_output.dot(wout.T)
hiddengrad = derivatives_sigmoid(hlayer_act)
d_hiddenlayer = EH * hiddengrad
wh += X.T.dot(d_hiddenlayer) *lr
37
RESULT:
Thus the program for backpropagation with CNN was executed successfully
38
Ex.No.11 PROGRAM FOR DENOISING IMAGE USING AUTOENCODERS AIM
To write a program for denoising image using autoencoders
ALGORITHM
PROGRAM
import numpy as np
import tensorflow as tf
def preprocess(array):
39
array = array.astype("float32") / 255.0
return array
def noise(array):
noise_factor = 0.4
n = 10
images1 = array1[indices, :]
images2 = array2[indices, :]
plt.figure(figsize=(20, 4))
ax = plt.subplot(2, n, i + 1)
plt.imshow(image1.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
40
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(image2.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
train_data = preprocess(train_data)
test_data = preprocess(test_data)
noisy_train_data = noise(train_data)
noisy_test_data = noise(test_data)
display(train_data, noisy_train_data)
# Encoder
41
x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(x)
# Decoder
# Autoencoder
autoencoder = Model(input, x)
autoencoder.compile(optimizer="adam", loss="binary_crossentropy")
autoencoder.summary()
autoencoder.fit( x=train_
data,
42
y=train_data,
epochs=50,
batch_size=128,
shuffle=True,
validation_data=(test_data, test_data),
predictions = autoencoder.predict(noisy_test_data)
display(noisy_test_data, predictions)
43
RESULT:
Thus the program for denoising image using autoencoders was executed successfully.
44