NotesWhat is notes.io?

Notes brand slogan

Notes - notes.io

24MAI008
Practical- 1
Leeon John
SLP (Single Layer Perceptron) for AND gate
import numpy as np
import matplotlib.pyplot as plt
# ANDgate input and output
input_data = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
output_data = np.array([0, 0, 0, 1])
# Learning rate and number of iterations
learning_rate = 0.01
iterations = 50
class SingleLayerPerceptron:
def __init__(self, learning_rate, iterations):
self.learning_rate = learning_rate
self.iterations = iterations
def train(self, input_data, output_data):
self.weights = np.zeros(input_data.shape[1])
self.loss_history = [] # Track loss for each iteration
self.weight1_history = [] # Track weight1 changes
self.weight2_history = [] # Track weight2 changes
self.bias = 0
for i in range(self.iterations):
total_error = 0
for j in range(input_data.shape[0]):
predicted = self.step_function(np.dot(self.weights, input_data[j]) + self.bias)
error = output_data[j]- predicted
self.weights += self.learning_rate * error * input_data[j]
self.bias += self.learning_rate * error
total_error += error ** 2
self.weight1_history.append(self.weights[0])
self.weight2_history.append(self.weights[1])
self.loss_history.append(total_error)
print(f"Trained Weights: {self.weights}, Bias: {self.bias}")
def step_function(self, activation_value):
return 1 if activation_value >= 0 else 0
def predict(self, input_data):
linear_output = np.dot(input_data, self.weights) + self.bias
return np.array([self.step_function(value) for value in linear_output])
# Create and train the perceptron
1
Neural Networks & Deep Learning
24MAI008
Leeon John
perceptron = SingleLayerPerceptron(learning_rate=learning_rate, iterations=iterations)
perceptron.train(input_data, output_data)
# Predictions on input data
predictions = perceptron.predict(input_data)
print("Predicted Outputs:", predictions)
# Plot loss vs iterations
plt.plot(range(iterations), perceptron.loss_history)
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.title('Loss over Iterations')
plt.show()
# Plot weight changes over iterations
plt.plot(range(iterations), perceptron.weight1_history, label='Weight 1')
plt.plot(range(iterations), perceptron.weight2_history, label='Weight 2')
plt.xlabel('Iterations')
plt.ylabel('Weights')
plt.title('Weight Changes vs Iterations')
plt.legend()
plt.show()
OUTPUT
Optimized Weights are [0.02 0.01] and bias is-0.03
Predictions: [0 0 0 1]
2
Neural Networks & Deep Learning
24MAI008
Leeon John
3
Neural Networks & Deep Learning
24MAI008
Practical- 2
Leeon John
MLP(Multilayer Perceptron) for XOR gate
import numpy as np
# Activation functions
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1- x)
def mse_loss(y_true, y_pred):
return np.mean((y_true- y_pred) ** 2)
# Training the MLP
class MLP:
def __init__(self, input_size, hidden_size, output_size):
# Initialize weights
self.weights_input_hidden = np.random.rand(input_size, hidden_size)
self.weights_hidden_output = np.random.rand(hidden_size, output_size)
# Initialize biases
self.bias_hidden = np.zeros((1, hidden_size))
self.bias_output = np.zeros((1, output_size))
self.weight1_history=[]
self.weight2_history=[]
def forward(self, X):
# Forward pass
self.hidden_input = np.dot(X, self.weights_input_hidden) + self.bias_hidden
self.hidden_output = sigmoid(self.hidden_input)
self.output_input = np.dot(self.hidden_output, self.weights_hidden_output) +
self.bias_output
output = sigmoid(self.output_input)
return output
def backward(self, X, y, output, learning_rate):
# Backward pass
output_error = y- output
output_delta = output_error * sigmoid_derivative(output)
hidden_error = output_delta.dot(self.weights_hidden_output.T)
hidden_delta = hidden_error * sigmoid_derivative(self.hidden_output)
# Update weights and biases
self.weights_hidden_output += self.hidden_output.T.dot(output_delta) * learning_rate
self.bias_output += np.sum(output_delta, axis=0, keepdims=True) * learning_rate
self.weights_input_hidden += X.T.dot(hidden_delta) * learning_rate
self.bias_hidden += np.sum(hidden_delta, axis=0, keepdims=True) * learning_rate
def train(self, X, y, epochs, learning_rate):
4
Neural Networks & Deep Learning
24MAI008
Leeon John
loss_history=[]
for epoch in range(epochs):
output = self.forward(X)
self.backward(X, y, output, learning_rate)
loss = mse_loss(y, output)
loss_history.append(loss)
self.weight1_history.append(self.weights_input_hidden.copy())
self.weight2_history.append(self.weights_hidden_output.copy())
# Calculate and print loss
if epoch % 100 == 0:
print(f'Epoch {epoch}, Loss: {loss}')
return loss_history
def predict(self, X):
return self.forward(X)
import matplotlib.pyplot as plt
# Data
X=np.array([[1,1], [1,0], [0,0], [0,1]])
y =np.array([[0], [1], [0], [1]]) # XOR problem
# Initialize MLP
mlp = MLP(input_size=2, hidden_size=2, output_size=1)
# Train the MLP
loss_history=mlp.train(X, y, epochs=3000, learning_rate=0.3)
plt.plot(loss_history)
plt.xlabel('epochs')
plt.ylabel('loss')
plt.show()
#weights graph
plt.plot(np.array(mlp.weight1_history)[:, 0, 0], label='w1[0,0]')
plt.plot(np.array(mlp.weight2_history)[:, 1, 0], label='w1[1,0]')
plt.xlabel('Epochs')
plt.ylabel('Weights')
plt.title('Weights from Input to Hidden Layer')
plt.legend()
plt.show()
5
Neural Networks & Deep Learning
24MAI008
Leeon John
OUTPUT
Epoch 0, Loss: 0.28632622071332564
Epoch 100, Loss: 0.24974752028060332
Epoch 200, Loss: 0.24954362571537672
Epoch 300, Loss: 0.24915385359965914
Epoch 400, Loss: 0.24835854954383405
Epoch 500, Loss: 0.24670177828064332
Epoch 600, Loss: 0.2434117690997297
Epoch 700, Loss: 0.23754003815062819
Epoch 800, Loss: 0.2283676599928358
Epoch 900, Loss: 0.21650088197755019
Epoch 1000, Loss: 0.2045387935814618
Epoch 1100, Loss: 0.1943547772280852
Epoch 1200, Loss: 0.18510837144882797
Epoch 1300, Loss: 0.17493306316427826
Epoch 1400, Loss: 0.16170925796080327
Epoch 1500, Loss: 0.14006196107528057
Epoch 1600, Loss: 0.10550061404982289
Epoch 1700, Loss: 0.06875643533457791
Epoch 1800, Loss: 0.04363030905454709
Epoch 1900, Loss: 0.029377588176241264
Epoch 2000, Loss: 0.02122622506953424
Epoch 2100, Loss: 0.016243675931611926
Epoch 2200, Loss: 0.012982798751198529
Epoch 2300, Loss: 0.010722487336864194
Epoch 2400, Loss: 0.009081531863586016
Epoch 2500, Loss: 0.007845123366759807
Epoch 2600, Loss: 0.006885039831125308
Epoch 2700, Loss: 0.0061208524983709205
Epoch 2800, Loss: 0.0054999369612976174
Epoch 2900, Loss: 0.00498660116621375
6
Neural Networks & Deep Learning
24MAI008
Leeon John
7
Neural Networks & Deep Learning
24MAI008
Practical- 3
Leeon John
TOUNDERSTANDTHEARCHITECTUREOFCNN
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
# Load the MNIST dataset
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Preprocess the data
train_images = train_images.reshape((train_images.shape[0], 28, 28, 1)).astype('float32') / 255
test_images = test_images.reshape((test_images.shape[0], 28, 28, 1)).astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# Build the CNN model
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
# Add afully connected layer
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
# Show the model summary
model.summary()
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model and save training history
history = model.fit(train_images, train_labels, epochs=5, batch_size=64, validation_data=(test_images,
test_labels))
# Evaluate the model
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(f"Test accuracy: {test_acc}")
# Plot training and validation accuracy and loss
def plot_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# Plot training and validation accuracy
plt.figure(figsize=(12, 4))
8
Neural Networks & Deep Learning
24MAI008
Leeon John
plt.subplot(1, 2, 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
# Plot training and validation loss
plt.subplot(1, 2, 2)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Call the plot function
plot_history(history)
OUTPUT
9
Neural Networks & Deep Learning
24MAI008
Leeon John
Epoch 1/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━20s10ms/step-accuracy:0.8933-loss:0.3357
val_accuracy: 0.9827- val_loss: 0.0542
Epoch 2/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━17s9ms/step-accuracy:0.9853-loss:0.0484
val_accuracy: 0.9883- val_loss: 0.0397
Epoch 3/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━17s9ms/step-accuracy:0.9902-loss:0.0330
val_accuracy: 0.9889- val_loss: 0.0385
Epoch 4/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━19s10ms/step-accuracy:0.9923-loss:0.0243
val_accuracy: 0.9897- val_loss: 0.0387
Epoch 5/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━17s9ms/step-accuracy:0.9942-loss:0.0185
val_accuracy: 0.9876- val_loss: 0.0407
313/313 ━━━━━━━━━━━━━━━━━━━━1s3ms/step-accuracy:0.9861-loss:0.0462
Test accuracy: 0.9876
10
Neural Networks & Deep Learning
24MAI008
Leeon John
Practical- 4
COMPARING DIFFERENTOPTIMIZERFORCNN
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plta
# Load the MNIST dataset
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Preprocess the data
train_images = train_images.reshape((train_images.shape[0], 28, 28, 1)).astype('float32') / 255
test_images = test_images.reshape((test_images.shape[0], 28, 28, 1)).astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# Build the CNN model function
def build_model():
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
return model
# Compile, train and evaluate the model with different optimizers
def train_and_evaluate(optimizer_name, optimizer):
model = build_model()
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
print(f"nTraining with {optimizer_name} optimizer")
history = model.fit(train_images, train_labels, epochs=5, batch_size=64,
validation_data=(test_images, test_labels), verbose=1)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(f"{optimizer_name} Test accuracy: {test_acc}")
return history
# Train with Adam, SGD, and RMSprop
history_adam = train_and_evaluate("Adam", tf.keras.optimizers.Adam())
history_sgd = train_and_evaluate("SGD", tf.keras.optimizers.SGD())
history_rmsprop = train_and_evaluate("RMSprop", tf.keras.optimizers.RMSprop())
11
Neural Networks & Deep Learning
24MAI008
Leeon John
# Plotting the results
def plot_comparison(history_adam, history_sgd, history_rmsprop):
epochs = range(1, 6)
plt.figure(figsize=(12, 6))
# Accuracy comparison
plt.subplot(1, 2, 1)
plt.plot(epochs, history_adam.history['accuracy'], 'r-o', label='Adam- Train Acc', markersize=5)
plt.plot(epochs, history_sgd.history['accuracy'], 'g-s', label='SGD- Train Acc', markersize=5)
plt.plot(epochs, history_rmsprop.history['accuracy'], 'b-^', label='RMSprop- Train Acc', markersize=5)
plt.title('Training Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
# Zoomin on the y-axis for better visibility of differences
plt.ylim(0.95, 1.00) # Adjust the y-axis limits to zoom in
# Loss comparison
plt.subplot(1, 2, 2)
plt.plot(epochs, history_adam.history['loss'], 'r-o', label='Adam- Train Loss', markersize=5)
plt.plot(epochs, history_sgd.history['loss'], 'g-s', label='SGD- Train Loss', markersize=5)
plt.plot(epochs, history_rmsprop.history['loss'], 'b-^', label='RMSprop- Train Loss', markersize=5)
plt.title('Training Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
# Zoomin on the y-axis for better visibility of differences
plt.ylim(0.0, 0.3) # Adjust the y-axis limits to zoom in
plt.tight_layout()
plt.show()
# Call the plot function
plot_comparison(history_adam, history_sgd, history_rmsprop)
model.summary()
OUTPUT
Training with Adam optimizer
Epoch 1/5
938/938 ━━━━━━━━━━━━━━━━━━━━13s13ms/step-accuracy:0.8581-loss:0.4428
val_accuracy: 0.9786- val_loss: 0.0631
Epoch 2/5
938/938 ━━━━━━━━━━━━━━━━━━━━12s12ms/step-accuracy:0.9826-loss:0.0550
val_accuracy: 0.9833- val_loss: 0.0454
Epoch 3/5
938/938 ━━━━━━━━━━━━━━━━━━━━11s12ms/step-accuracy:0.9879-loss:0.0379
val_accuracy: 0.9884- val_loss: 0.0335
Epoch 4/5
12
Neural Networks & Deep Learning
24MAI008
Leeon John
938/938 ━━━━━━━━━━━━━━━━━━━━11s12ms/step-accuracy:0.9916-loss:0.0274
val_accuracy: 0.9893- val_loss: 0.0312
Epoch 5/5
938/938 ━━━━━━━━━━━━━━━━━━━━11s12ms/step-accuracy:0.9930-loss:0.0228
val_accuracy: 0.9904- val_loss: 0.0280
313/313 ━━━━━━━━━━━━━━━━━━━━1s3ms/step-accuracy:0.9879-loss:0.0338
Adam Test accuracy: 0.9904000163078308
Training with SGD optimizer
Epoch 1/5
938/938 ━━━━━━━━━━━━━━━━━━━━12s13ms/step-accuracy:0.4697-loss:1.6572
val_accuracy: 0.9218- val_loss: 0.2496
Epoch 2/5
938/938 ━━━━━━━━━━━━━━━━━━━━12s13ms/step-accuracy:0.9293-loss:0.2341
val_accuracy: 0.9557- val_loss: 0.1433
Epoch 3/5
938/938 ━━━━━━━━━━━━━━━━━━━━12s13ms/step-accuracy:0.9552-loss:0.1464
val_accuracy: 0.9676- val_loss: 0.1042
Epoch 4/5
938/938 ━━━━━━━━━━━━━━━━━━━━12s13ms/step-accuracy:0.9640-loss:0.1114
val_accuracy: 0.9737- val_loss: 0.0895
Epoch 5/5
938/938 ━━━━━━━━━━━━━━━━━━━━13s13ms/step-accuracy:0.9706-loss:0.0952
val_accuracy: 0.9795- val_loss: 0.0686
313/313 ━━━━━━━━━━━━━━━━━━━━1s3ms/step-accuracy:0.9751-loss:0.0805
SGDTest accuracy: 0.9794999957084656
Training with RMSprop optimizer
Epoch 1/5
938/938 ━━━━━━━━━━━━━━━━━━━━13s13ms/step-accuracy:0.8606-loss:0.4213
val_accuracy: 0.9858- val_loss: 0.0428
Epoch 2/5
938/938 ━━━━━━━━━━━━━━━━━━━━12s13ms/step-accuracy:0.9837-loss:0.0516
val_accuracy: 0.9837- val_loss: 0.0484
Epoch 3/5
938/938 ━━━━━━━━━━━━━━━━━━━━12s13ms/step-accuracy:0.9896-loss:0.0322
val_accuracy: 0.9891- val_loss: 0.0362
Epoch 4/5
938/938 ━━━━━━━━━━━━━━━━━━━━12s13ms/step-accuracy:0.9926-loss:0.0237
val_accuracy: 0.9893- val_loss: 0.0355
Epoch 5/5
938/938 ━━━━━━━━━━━━━━━━━━━━12s12ms/step-accuracy:0.9952-loss:0.0170
val_accuracy: 0.9920- val_loss: 0.0296
313/313 ━━━━━━━━━━━━━━━━━━━━1s3ms/step-accuracy:0.9893-loss:0.0394
RMSprop Test accuracy: 0.9919999837875366
13
Neural Networks & Deep Learning
24MAI008
Leeon John
14
Neural Networks & Deep Learning
24MAI008
Leeon John
Practical- 5
COMPARING DIFFERENTOPTIMIZERFORCNN
import kagglehub
import os
path = kagglehub.dataset_download("toaharahmanratul/lag-dataset")
print("Path to dataset files:", path)
dataset_path = path
files = os.listdir(path)
print("Files in dataset:", files)
train_glaucoma_image = os.path.join(dataset_path, 'LAG/train/glaucoma/image')
train_glaucoma_att_image=os.path.join(dataset_path, 'LAG/train/glaucoma/attention_map')
train_non_glaucoma_image = os.path.join(dataset_path, 'LAG/train/non_glaucoma/image')
train_non_glaucoma_att_image=os.path.join(dataset_path, 'LAG/train/non_glaucoma/attention_map')
test_glaucoma_image = os.path.join(dataset_path, 'LAG/test/glaucoma/image')
test_glaucoma_att_image=os.path.join(dataset_path, 'LAG/test/glaucoma/attention_map')
test_non_glaucoma_image = os.path.join(dataset_path, 'LAG/test/non_glaucoma/image')
test_non_glaucoma_att_image = os.path.join(dataset_path, 'LAG/test/non_glaucoma/attention_map')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
glaucoma_images = os.listdir(train_glaucoma_image)
first_glaucoma_image = glaucoma_images[0]
glaucoma_image_path = os.path.join(train_glaucoma_image, first_glaucoma_image)
glaucoma_att_images = os.listdir(train_glaucoma_att_image)
first_glaucoma_att_image = glaucoma_att_images[0]
glaucoma_att_image_path = os.path.join(train_glaucoma_att_image, first_glaucoma_att_image)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
img_glaucoma = mpimg.imread(glaucoma_image_path)
plt.imshow(img_glaucoma)
plt.title('First Glaucoma Image')
plt.axis('off')
plt.subplot(1, 2, 2)
img_glaucoma = mpimg.imread(glaucoma_att_image_path)
plt.imshow(img_glaucoma)
plt.title('First Glaucoma attention Image')
plt.axis('off')
plt.show()
15
Neural Networks & Deep Learning
24MAI008
Leeon John
non_glaucoma_images = os.listdir(train_non_glaucoma_image)
first_non_glaucoma_image = non_glaucoma_images[0]
non_glaucoma_image_path = os.path.join(train_non_glaucoma_image, first_non_glaucoma_image)
non_glaucoma_att_images = os.listdir(train_non_glaucoma_att_image)
first_non_glaucoma_att_image = non_glaucoma_att_images[0]
non_glaucoma_att_image_path = os.path.join(train_non_glaucoma_att_image, first_non_glaucoma_att_image)
plt.figure(figsize=(10, 5))
# Show the Non-Glaucoma image
plt.subplot(1, 2, 1)
img_non_glaucoma = mpimg.imread(non_glaucoma_image_path)
plt.imshow(img_non_glaucoma)
plt.title('First Non-Glaucoma Image')
plt.axis('off')
plt.subplot(1, 2, 2)
img_non_glaucoma = mpimg.imread(non_glaucoma_att_image_path)
plt.imshow(img_non_glaucoma)
plt.title('First Non-Glaucoma Image')
plt.axis('off')
plt.show()
16
Neural Networks & Deep Learning
24MAI008
Leeon John
import tensorflow as tf
from tensorflow.keras.applications import (
VGG16, VGG19, ResNet50, ResNet101, ResNet152, InceptionV3)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers, models
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_dir = os.path.join(dataset_path, 'LAG/train')
image_size = (224, 224)
batch_size = 32
datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)
# Flow from directory for training images from both glaucoma and non-glaucoma
train_generator = datagen.flow_from_directory(
train_dir,
target_size=image_size,
batch_size=batch_size,
class_mode='binary',
subset='training' )
# Flow from directory for validation images from both glaucoma and non-glaucoma
validation_generator = datagen.flow_from_directory(
train_dir,
target_size=image_size,
batch_size=batch_size,
class_mode='binary',
subset='validation' )
17
Neural Networks & Deep Learning
24MAI008
Leeon John
def create_model(base_model, lr=0.001):
model = models.Sequential()
model.add(base_model)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=Adam(learning_rate=lr), loss='binary_crossentropy', metrics=['accuracy'])
return model
# Create VGG16 model
vgg16 = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
vgg16_model = create_model(vgg16)
history_vgg16 = vgg16_model.fit(train_generator, epochs=4, validation_data=validation_generator, verbose=1)
vgg16_training_accuracy = history_vgg16.history['accuracy'][-1]
vgg16_val_accuracy = max(history_vgg16.history['val_accuracy'])
print(f"VGG16: Training Accuracy: {vgg16_training_accuracy * 100:.2f}%, Validation Accuracy:
{vgg16_val_accuracy * 100:.2f}%")
#googlenet
googlenet = InceptionV3(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
googlenet_model = create_model(googlenet)
history_googlenet = googlenet_model.fit(train_generator, epochs=4, validation_data=validation_generator,
verbose=1)
googlenet_training_accuracy = history_googlenet.history['accuracy'][-1]
googlenet_val_accuracy = max(history_googlenet.history['val_accuracy'])
print(f"GoogleNet: Training Accuracy: {googlenet_training_accuracy * 100:.2f}%, Validation Accuracy:
{googlenet_val_accuracy * 100:.2f}%")
# ResNet50
resnet50 = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
resnet50_model = create_model(resnet50)
history_resnet50 = resnet50_model.fit(train_generator, epochs=4, validation_data=validation_generator,
verbose=1)
resnet50_training_accuracy = history_resnet50.history['accuracy'][-1]
resnet50_val_accuracy = max(history_resnet50.history['val_accuracy'])
print(f"ResNet50: Training Accuracy: {resnet50_training_accuracy * 100:.2f}%, Validation Accuracy:
{resnet50_val_accuracy * 100:.2f}%")
# ResNet101
resnet101 = ResNet101(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
resnet101_model = create_model(resnet101)
history_resnet101 = resnet101_model.fit(train_generator, epochs=4, validation_data=validation_generator,
verbose=1)
resnet101_training_accuracy = history_resnet101.history['accuracy'][-1]
resnet101_val_accuracy = max(history_resnet101.history['val_accuracy'])
print(f"ResNet101: Training Accuracy: {resnet101_training_accuracy * 100:.2f}%, Validation Accuracy:
{resnet101_val_accuracy * 100:.2f}%")
# ResNet152 fggfgfdertrefd
18
Neural Networks & Deep Learning
24MAI008
Leeon John
resnet152 = ResNet152(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
resnet152_model = create_model(resnet152)
history_resnet152 = resnet152_model.fit(train_generator, epochs=4, validation_data=validation_generator,
verbose=1)
resnet152_training_accuracy = history_resnet152.history['accuracy'][-1]
resnet152_val_accuracy = max(history_resnet152.history['val_accuracy'])
print(f"ResNet152: Training Accuracy: {resnet152_training_accuracy * 100:.2f}%, Validation Accuracy:
{resnet152_val_accuracy * 100:.2f}%")
accuracy_dict = {
'VGG16': vgg16_training_accuracy,
'GoogleNet': googlenet_training_accuracy,
'ResNet50': resnet50_training_accuracy,
'Res': resnet101_training_accuracy,
'ResNet152': resnet152_training_accuracy
}
names = list(accuracy_dict.keys())
accuracies = list(accuracy_dict.values())
plt.figure(figsize=(10, 5))
plt.barh(names, accuracies, color='blue')
plt.xlabel('Accuracy')
plt.title('Accuracy of Different Pre-trained Models')
plt.show()
19
Neural Networks & Deep Learning
24MAI008
Practical- 6
Leeon John
GAN(Generative Adversarial Networks)
import tensorflow as tf
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (valid_images, valid_labels) = fashion_mnist.load_data()
import matplotlib.pyplot as plt
data_idx = 42
plt.figure()
plt.imshow(train_images[data_idx], cmap='gray')
plt.colorbar()
plt.grid(False)
plt.show()
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
# Load and preprocess MNIST data
fashion_mnist_data = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (valid_images, valid_labels) = fashion_mnist_data.load_data()
train_images = (train_images.astype(np.float32)- 127.5) / 127.5 # Normalize to [-1, 1]
train_images = np.expand_dims(train_images, axis=-1)
BUFFER_SIZE = 60000
BATCH_SIZE = 156
# Create datasets
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
# Generator model
def build_generator():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False,
activation='tanh'))
return model
# Discriminator model
def build_discriminator():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
20
Neural Networks & Deep Learning
24MAI008
Leeon John
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
# Loss function
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
# Optimizers
generator = build_generator()
discriminator = build_discriminator()
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
# Training parameters
EPOCHS=70
noise_dim = 100
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])
gen_losses = []
disc_losses = []
gen_accuracies = []
disc_accuracies = []
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
# Calculate accuracy
real_accuracy = tf.reduce_mean(tf.cast(real_output > 0, tf.float32))
fake_accuracy = tf.reduce_mean(tf.cast(fake_output < 0, tf.float32))
disc_accuracy = 0.5 * (real_accuracy + fake_accuracy)
gen_accuracy = 1- fake_accuracy
return gen_loss, disc_loss, gen_accuracy, disc_accuracy
def train(dataset, epochs):
for epoch in range(epochs):
gen_loss_epoch = 0
disc_loss_epoch = 0
21
Neural Networks & Deep Learning
24MAI008
Leeon John
gen_accuracy_epoch = 0
disc_accuracy_epoch = 0
for image_batch in dataset:
gen_loss, disc_loss, gen_accuracy, disc_accuracy = train_step(image_batch)
gen_loss_epoch += gen_loss
disc_loss_epoch += disc_loss
gen_accuracy_epoch += gen_accuracy
disc_accuracy_epoch += disc_accuracy
# Average metrics for the epoch
gen_loss_epoch /= len(dataset)
disc_loss_epoch /= len(dataset)
gen_accuracy_epoch /= len(dataset)
disc_accuracy_epoch /= len(dataset)
# Append losses and accuracies
gen_losses.append(gen_loss_epoch)
disc_losses.append(disc_loss_epoch)
gen_accuracies.append(gen_accuracy_epoch)
disc_accuracies.append(disc_accuracy_epoch)
# Produce images for the GIF
generate_and_save_images(generator, epoch + 1, seed)
print(f'Epoch {epoch + 1}, Generator Loss: {gen_loss_epoch:.4f}, Discriminator Loss:
{disc_loss_epoch:.4f}, '
f'Generator Accuracy: {gen_accuracy_epoch:.4f}, Discriminator Accuracy:
{disc_accuracy_epoch:.4f}')
def generate_and_save_images(model, epoch, test_input):
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig(f'image_at_epoch_{epoch}.png')
plt.show()
train(train_dataset, EPOCHS)
plt.figure(figsize=(12, 5))
# Loss plot
plt.subplot(1, 2, 1)
plt.plot(gen_losses, label="Generator Loss")
plt.plot(disc_losses, label="Discriminator Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Training Loss of Generator and Discriminator")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(gen_accuracies, label="Generator Accuracy")
plt.plot(disc_accuracies, label="Discriminator Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Training Accuracy of Generator and Discriminator")
plt.legend()
plt.tight_layout()
plt.show()
22
Neural Networks & Deep Learning
24MAI008
Leeon John
23
Neural Networks & Deep Learning
24MAI008
Leeon John
24
Neural Networks & Deep Learning
24MAI008
Practical- 7
Leeon John
Autoencoder
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt
# Loading the dataset
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
# Parameters
input_dim = x_train.shape[1] # 28x28 pixels flattened
# Autoencoder structure
encoding_dim = 64 # Size of the latent space (hidden)
input_img = Input(shape=(input_dim,))
encoded = Dense(encoding_dim, activation="relu")(input_img)
decoded = Dense(input_dim, activation="sigmoid")(encoded)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer="adam", loss="binary_crossentropy")
# Training
history=autoencoder.fit(
x_train, x_train,
epochs=15,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# Encode and decode some test images
encoded_imgs = autoencoder.predict(x_test)
decoded_imgs = autoencoder.predict(encoded_imgs)
# Display the original and reconstructed images
n =10 #Howmanydigits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# Original images
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28), cmap="gray")
plt.title("Original")
plt.axis("off")
# Reconstructed images
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28), cmap="gray")
plt.title("Reconstructed")
plt.axis("off")
plt.show()
25
Neural Networks & Deep Learning
24MAI008
Leeon John
# Plotting the training and validation loss
plt.figure(figsize=(10, 6))
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title("Training and Validation Loss over Epochs")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
Epoch 1/15
235/235 ━━━━━━━━━━━━━━━━━━━━3s7ms/step-loss:0.3467-val_loss:0.1638
Epoch 2/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s2ms/step-loss:0.1533-val_loss:0.1275
Epoch 3/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s2ms/step-loss:0.1232-val_loss:0.1087
Epoch 4/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s3ms/step-loss:0.1068-val_loss:0.0975
Epoch 5/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s2ms/step-loss:0.0965-val_loss:0.0903
Epoch 6/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s2ms/step-loss:0.0898-val_loss:0.0852
Epoch 7/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s2ms/step-loss:0.0851-val_loss:0.0816
Epoch 8/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s3ms/step-loss:0.0816-val_loss:0.0791
Epoch 9/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s3ms/step-loss:0.0795-val_loss:0.0774
Epoch 10/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s2ms/step-loss:0.0779-val_loss:0.0763
Epoch 11/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s2ms/step-loss:0.0767-val_loss:0.0755
Epoch 12/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s2ms/step-loss:0.0760-val_loss:0.0748
Epoch 13/15
...
Epoch 15/15
235/235 ━━━━━━━━━━━━━━━━━━━━1s2ms/step-loss:0.0746-val_loss:0.0738
313/313 ━━━━━━━━━━━━━━━━━━━━1s1ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━0s1ms/step
26
Neural Networks & Deep Learning
24MAI008
Leeon John
27
Neural Networks & Deep Learning
24MAI008
Practical- 8
Leeon John
Variational Autoencoder(VAE)
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt
# Load and preprocess MNIST data
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
x_train = np.reshape(x_train, (-1, 28, 28, 1))
x_test = np.reshape(x_test, (-1, 28, 28, 1))
# Define VAE model parameters
latent_dim = 2 # Dimension of the latent space
# Encoder network
inputs = layers.Input(shape=(28, 28, 1))
x =layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(inputs)
x =layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
x =layers.Flatten()(x)
x =layers.Dense(16, activation="relu")(x)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
# Custom Sampling Layer
class Sampling(layers.Layer):
def call(self, inputs):
z_mean, z_log_var = inputs
epsilon = tf.random.normal(shape=tf.shape(z_mean))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
z =Sampling()([z_mean, z_log_var])
# Decoder network
decoder_inputs = layers.Input(shape=(latent_dim,))
x =layers.Dense(7 * 7 * 64, activation="relu")(decoder_inputs)
x =layers.Reshape((7, 7, 64))(x)
x =layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x)
x =layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
# Models
encoder = Model(inputs, [z_mean, z_log_var, z], name="encoder")
decoder = Model(decoder_inputs, outputs, name="decoder")
vae_outputs = decoder(z)
vae = Model(inputs, vae_outputs, name="vae")
# Define the custom training loop
optimizer = tf.keras.optimizers.Adam()
# Loss functions
def compute_loss(x, x_decoded, z_mean, z_log_var):
# Reconstruction loss
28
Neural Networks & Deep Learning
24MAI008
Leeon John
reconstruction_loss = tf.reduce_mean(
tf.keras.losses.binary_crossentropy(x, x_decoded)
) * 28 * 28
# KLdivergence
kl_loss =-0.5 * tf.reduce_sum(1 + z_log_var- tf.square(z_mean)- tf.exp(z_log_var), axis=-1)
return tf.reduce_mean(reconstruction_loss + kl_loss)
# Training step
@tf.function
def train_step(x):
with tf.GradientTape() as tape:
z_mean, z_log_var, z = encoder(x)
x_decoded = decoder(z)
loss = compute_loss(x, x_decoded, z_mean, z_log_var)
gradients = tape.gradient(loss, vae.trainable_variables)
optimizer.apply_gradients(zip(gradients, vae.trainable_variables))
return loss
# Train the VAE model
epochs = 20
batch_size = 128
for epoch in range(epochs):
print(f"Epoch {epoch + 1}/{epochs}")
for i in range(0, len(x_train), batch_size):
batch = x_train[i:i + batch_size]
loss = train_step(batch)
print(f"Loss: {loss.numpy()}")
# Test the VAE by generating images from random points in the latent space
def plot_latent_space(decoder, n=15, digit_size=28):
scale = 2.0
figure = np.zeros((digit_size * n, digit_size * n))
grid_x = np.linspace(-scale, scale, n)
grid_y = np.linspace(-scale, scale, n)
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi]])
x_decoded = decoder.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap="Greys_r")
plt.axis("off")
plt.show()
plot_latent_space(decoder)
Epoch 1/20
Loss: 181.914306640625
29
Neural Networks & Deep Learning
24MAI008
Leeon John
Epoch 2/20
Loss: 172.988525390625
Epoch 3/20
Loss: 167.75706481933594
Epoch 4/20
Loss: 164.3065185546875
Epoch 5/20
Loss: 162.8129119873047
Epoch 6/20
Loss: 161.01800537109375
Epoch 7/20
Loss: 159.90660095214844
Epoch 8/20
Loss: 159.00697326660156
Epoch 9/20
Loss: 158.27330017089844
Epoch 10/20
Loss: 157.18226623535156
Epoch 11/20
Loss: 155.53697204589844
Epoch 12/20
Loss: 154.06996154785156
Epoch 13/20
Loss: 154.075927734375
Epoch 14/20
Loss: 154.18553161621094
Epoch 15/20
Loss: 153.10816955566406
Epoch 16/20
Loss: 152.5873260498047
Epoch 17/20
Loss: 151.85647583007812
Epoch 18/20
Loss: 151.90882873535156
Epoch 19/20
Loss: 150.2215118408203
Epoch 20/20
Loss: 150.9467010498047
1/1 ━━━━━━━━━━━━━━━━━━━━0s401ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s16ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s19ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s14ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s16ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s14ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s16ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s17ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s16ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s22ms/step
30
Neural Networks & Deep Learning
24MAI008
Leeon John
1/1 ━━━━━━━━━━━━━━━━━━━━0s17ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s20ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s21ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s19ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s18ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s19ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s17ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s16ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s22ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s19ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s18ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s18ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s16ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━0s15ms/step
31
Neural Networks & Deep Learning
24MAI008
Leeon John
Practical 9
Long short term memory(LSTM) and Bidirectional LSTM
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
from sklearn.metrics import mean_squared_error
# Load dataset
stock_symbol = 'AAPL'
data = yf.download(stock_symbol, start="2015-01-01", end="2023-01-01")
data = data[['Close']]
# Preprocess data
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data)
# Split data into training and test sets
train_size = int(len(scaled_data) * 0.8)
train_data = scaled_data[:train_size]
test_data = scaled_data[train_size:]
# Prepare training data
sequence_length = 100
X_train = []
y_train = []
for i in range(sequence_length, len(train_data)):
X_train.append(train_data[i-sequence_length:i, 0])
y_train.append(train_data[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshape data for LSTM
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# LSTMmodel
model = Sequential()
model.add(LSTM(units=70, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(units=70))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
# Train the model
history = model.fit(X_train, y_train, epochs=70, batch_size=32, verbose=1)
32
Neural Networks & Deep Learning
24MAI008
Leeon John
# Training loss
plt.figure(figsize=(10, 4))
plt.plot(history.history['loss'], label='Training Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Training Loss Over Epochs')
plt.legend()
plt.show()
# Prepare test data
X_test = []
y_test = []
for i in range(sequence_length, len(test_data)):
X_test.append(test_data[i-sequence_length:i, 0])
y_test.append(test_data[i, 0])
X_test = np.array(X_test)
y_test = data[train_size + sequence_length:]['Close'].values # Actual test data
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
# Predict on test data
predicted_prices = model.predict(X_test)
predicted_prices = scaler.inverse_transform(predicted_prices)
# Actual vs. predicted prices
plt.figure(figsize=(10, 6))
plt.plot(data.index[train_size + sequence_length:], y_test, label='Actual Stock Price')
plt.plot(data.index[train_size + sequence_length:], predicted_prices, label='Predicted Stock Price')
plt.xlabel('Date')
plt.ylabel('Stock Price')
plt.title('Actual vs. Predicted Stock Price')
plt.legend()
plt.show()
# Test loss
test_loss = mean_squared_error(y_test, predicted_prices)
print("Test MSE Loss:", test_loss)
33
Neural Networks & Deep Learning
24MAI008
Leeon John
34
Neural Networks & Deep Learning
24MAI008
Practical 10
Leeon John
Bidirectional LSTM
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Bidirectional, LSTM, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import mean_squared_error
# Load dataset
stock_symbol = 'AAPL'
data = yf.download(stock_symbol, start="2015-01-01", end="2023-01-01")
data = data[['Close']]
# Preprocess data
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data)
# Split data into training and test sets
train_size = int(len(scaled_data) * 0.8)
train_data = scaled_data[:train_size]
test_data = scaled_data[train_size:]
# Prepare training data
sequence_length = 160
X_train = []
y_train = []
for i in range(sequence_length, len(train_data)):
X_train.append(train_data[i-sequence_length:i, 0])
y_train.append(train_data[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshape data for LSTM
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Build Bidirectional LSTM model
model = Sequential()
model.add(Bidirectional(LSTM(units=70, return_sequences=True), input_shape=(X_train.shape[1], 1)))
# model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(units=70, return_sequences=True)))
# model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(units=70)))
# model.add(Dropout(0.2))
model.add(Dense(1))
optimizer = Adam(learning_rate=0.01)
model.compile(optimizer=optimizer, loss='mean_squared_error')
# Train the model
35
Neural Networks & Deep Learning
24MAI008
Leeon John
history = model.fit(X_train, y_train, epochs=70, batch_size=32, verbose=1)
# Plot training loss
plt.figure(figsize=(10, 4))
plt.plot(history.history['loss'], label='Training Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Training Loss Over Epochs')
plt.legend()
plt.show()
# Prepare test data
X_test = []
y_test = []
for i in range(sequence_length, len(test_data)):
X_test.append(test_data[i-sequence_length:i, 0])
y_test.append(test_data[i, 0])
X_test = np.array(X_test)
y_test = data[train_size + sequence_length:]['Close'].values # Actual test data
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
# Predict on test data
predicted_prices = model.predict(X_test)
predicted_prices = scaler.inverse_transform(predicted_prices)
# Plot actual vs. predicted prices
plt.figure(figsize=(10, 6))
plt.plot(data.index[train_size + sequence_length:], y_test, label='Actual Stock Price')
plt.plot(data.index[train_size + sequence_length:], predicted_prices, label='Predicted Stock Price')
plt.xlabel('Date')
plt.ylabel('Stock Price')
plt.title('Actual vs. Predicted Stock Price')
plt.legend()
plt.show()
# Calculate and print test MSE
test_loss = mean_squared_error(y_test, predicted_prices)
print("Test MSE Loss:", test_loss)
36
Neural Networks & Deep Learning
24MAI008
Leeon John
37
Neural Networks & Deep Learning
     
 
what is notes.io
 

Notes is a web-based application for online taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000+ notes created and continuing...

With notes.io;

  • * You can take a note from anywhere and any device with internet connection.
  • * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
  • * You can quickly share your contents without website, blog and e-mail.
  • * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
  • * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.

Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.

Easy: Notes.io doesn’t require installation. Just write and share note!

Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )

Free: Notes.io works for 14 years and has been free since the day it was started.


You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;


Email: [email protected]

Twitter: http://twitter.com/notesio

Instagram: http://instagram.com/notes.io

Facebook: http://facebook.com/notesio



Regards;
Notes.io Team

     
 
Shortened Note Link
 
 
Looding Image
 
     
 
Long File
 
 

For written notes was greater than 18KB Unable to shorten.

To be smaller than 18KB, please organize your notes, or sign in.