NotesWhat is notes.io?

Notes brand slogan

Notes - notes.io

notes
notes logoFast | Easy | Short
Online Note Services - notes.io
Popular notes
LAB-1 MP_NEURON AND PERCEPTRON
"""
Boolean AND
"""
theta=0
w=1
sum=0
a=[[0,0,0],
[0,1,0],
[1,0,0],
[1,1,1]]
def threshold(theta,sum): #to check if the threshold is reached or not
if(theta>=sum):
return 1
else:
return 0
for i in range(len(a)):
sum=0
for j in range(len(a[i])-1):
sum+=a[i][j]*w
print("AND({},{})={}".format(a[i][0],a[i][1],threshold(len(a[i])-1,sum)))

"""Boolean OR"""

theta=0
w=1
sum=0

a=[[0,0,0],
[0,1,1],
[1,0,1],
[1,1,1]]

def threshold(theta,sum): #to check if the threshold is reached or not
if(theta>=sum):
return 1
else:
return 0

for i in range(len(a)):
sum=0
for j in range(len(a[i])-1):
sum+=a[i][j]*w
print("OR({},{})={}".format(a[i][0],a[i][1],threshold(1,sum)))

"""PERCEPTRON"""

theta=0
w=1
sum=0

"""Boolean AND"""

input_=[[0,0],
[0,1],
[1,0],
[1,1]]

output=[0,0,0,1]

l=0.01 #learning rate
import random
w=[]
for i in range(len(input_)):
a=round(random.uniform(0,1),2)
w.append(a)

w

b=round(random.uniform(0,1),2)
b

def step_func(x):
if(x>1):
return 1
else:
return 0

def error(out):
return output[i]-out

for i in range(len(input_)):
sum=0
out=0
for j in range(len(input_[i])):
sum+=input_[i][j]*w[i]+b
out=step_func(sum)
a=error(out)
while(a!=0):
for j in range(len(input_[i])):
sum+=input_[i][j]*w[i]+b
out=step_func(sum)
a=error(out)
for j in range(len(input_[i])):
w[i]+=w[i]+a*output[i]
b=b+a
print("AND({},{})={}".format(input_[i][0],input_[i][1],out))

"""Boolean OR"""

input_=[[0,0],
[0,1],
[1,0],
[1,1]]

output=[0,1,1,1]

l=0.01 #learning rate
import random
w=[]
for i in range(len(input_)):
a=round(random.uniform(0,1),2)
w.append(a)

w

b=round(random.uniform(0,1),2)
b

def step_func(x):
if(x>1):
return 1
else:
return 0

def error(out):
return output[i]-out

for i in range(len(input_)):
sum=0
out=0
for j in range(len(input_[i])):
sum+=input_[i][j]*w[i]+b
out=step_func(sum)
a=error(out)
while(a!=0):
for j in range(len(input_[i])):
sum+=input_[i][j]*w[i]+b
out=step_func(sum)
a=error(out)
for j in range(len(input_[i])):
w[i]+=w[i]+a*output[i]
b=b+a
print("OR({},{})={}".format(input_[i][0],input_[i][1],out))

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
LAB-2 SIGMOID NEURON
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from keras.layers import Dense
from keras.models import Sequential
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

from google.colab import files
uploaded=files.upload()

data=pd.read_csv("diabetes.csv")

list(data)

data.head()

data['Outcome']

# To check whether the target variable is well balanced or not
sns.countplot(x = 'Outcome', data = data)

"""Training """

data.drop(['SkinThickness'], axis=1)

X = data.drop(['SkinThickness'], axis = 1).values
y = data['Outcome'].values

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)

"""Compile the model"""

from sklearn.preprocessing import MinMaxScaler

scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

model = Sequential()
# Output neuron
# Since it is a binary classification last layer output needs to get converted into 1/0
model.add(Dense(1, activation = 'sigmoid'))

model.compile(loss = 'binary_crossentropy', optimizer = 'adam',metrics=['accuracy'])

model.fit(x = X_train, y = y_train, epochs = 125, validation_data = (X_test, y_test))

predictions=model.predict(X_test)
predictions

for i in range(len(predictions)):
if predictions[i]>0.5:
predictions[i]=1.0
else:
predictions[i]=0.0

predictions

print(accuracy_score(y_test,predictions)*100,'%')
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////LA3 FFN for classification


import numpy as np
import pandas as pd

from sklearn.datasets import load_iris
import pandas as pd

iris=load_iris()

data = pd.DataFrame(iris.data)
data['target'] = iris.target

data

X = data.drop('target', axis = 1)
y = data.target

from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_scaler = scaler.fit_transform(X)
X_scaler[1]

from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_scaler, y, test_size=0.2, random_state=2)

import tensorflow as tf

model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape = X_train.shape[1:]),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(32, activation = 'relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(64, activation = 'relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(4, activation = 'softmax')
])

model.compile(
loss = 'sparse_categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)

model.fit(X_train, y_train, epochs = 200)

model.evaluate(X_test, y_test)

predictions = model.predict(X_test)
predictions.shape
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
LAB-4 LINEAR REGRESSION WITH SGD
import numpy as np
import pandas as pd

from google.colab import files
uploaded=files.upload()

data = pd.read_csv('Wine_Quality_Dataset.csv')

data

X = np.array(data['alcohol'])
y = np.array(data['quality'])
l=len(X)

from sklearn.model_selection import train_test_split

"""**Train the model**"""

#training data
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=1)
lx=len(X_train)

m=0.1
c=0.5
alpha=0.01
n=1000
for i in range(n):
slope=0
intercept=0
for j in range(lx):
random_index=np.random.randint(lx)
intercept=intercept+((m*X_train[random_index:random_index+1]+c)-y_train[random_index:random_index+1])
slope=slope+((m*X_train[random_index:random_index+1]+c)-y_train[random_index:random_index+1])*X_train[random_index:random_index+1]
c=c-alpha*(intercept/lx)
m=m-alpha*(slope/lx)

print(f"slope is {m}")
print(f"intercept is {c}")

y_pred=np.dot(m[0],X_test)+c[0]
y_pred

from sklearn.metrics import mean_squared_error,r2_score,mean_absolute_error
from sklearn.metrics import explained_variance_score

"""**Metric scores of SGD**"""

print("Mse error:",mean_squared_error(y_test,y_pred))
print("rmse error:",np.sqrt(mean_squared_error(y_test,y_pred)))
print("abs error:",mean_absolute_error(y_test,y_pred))
print("variance:",explained_variance_score(y_test,y_pred))
print("scores:",r2_score(y_test,y_pred))
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////LAB-5 Mini Batch GD

#linear regression with minibatch grad descent
class mini_batch_gradient_descent:

def create_batch(self,x_train,y_train,batch_size):
mini_batches=[]
data=np.stack((x_train,y_train),axis=1)
np.random.shuffle(data)
no_of_batches=x_train.shape[0]//batch_size
for i in range(no_of_batches):
mini_batch=data[i*batch_size:(i+1)*batch_size]
mini_batches.append((mini_batch[:,0],mini_batch[:,1]))
if x_train.shape[0]%batch_size!=0:
mini_batch=data[(i+1)*batch_size:]
mini_batches.append((mini_batch[:,0],mini_batch[:,1]))
return mini_batches

def fit(self,x_train,y_train,alpha,epochs,batch_size):
self.m=np.random.randn(1,1)
self.c=np.random.randn(1,1)
l=len(x_train)
for i in range(epochs):
batches=self.create_batch(x_train,y_train,batch_size)
for batch in batches:
xb=batch[0]
yb=batch[1]
xb=xb.reshape(1,xb.shape[0])
intecept=np.sum((np.dot(self.m,xb)+self.c)-yb)
slope=np.sum(((np.dot(self.m,xb)+self.c)-yb)*xb)
self.m=self.m-alpha*(slope/l)
self.c=self.c-alpha*(intecept/l)

def slope_intercept(self):
print(f"slope is {self.m[0][0]}")
print(f"intecept is {self.c[0][0]}")

def predict(self,x_test):
x_test=x_test.reshape(x_test.shape[0],1)
self.m=self.m.reshape(self.m.shape[1],self.m.shape[0])
result=np.dot(x_test,self.m)+self.c
return result

MBGD=mini_batch_gradient_descent()
MBGD.fit(X_train,y_train,0.01,4000,4)

MBGD.slope_intercept()

#predictions
y_pred=MBGD.predict(X_test)
from sklearn.metrics import mean_squared_error,r2_score,mean_absolute_error
from sklearn.metrics import explained_variance_score

"""**METRICS SCORES OF MINI-BATCH GRADIENT DESCENT**"""

print("Mse error:",mean_squared_error(y_test,y_pred))
print("rmse error:",np.sqrt(mean_squared_error(y_test,y_pred)))
print("abs error:",mean_absolute_error(y_test,y_pred))
print("variance:",explained_variance_score(y_test,y_pred))
print("scores:",r2_score(y_test,y_pred))
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
LAB-6 small concepts together (L2, Droput, Agumentation, Early sleeping)

import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

mnist = tf.keras.datasets.mnist #Get the data

(x_train, y_train),(x_test, y_test) = mnist.load_data()

x_train = tf.keras.utils.normalize(x_train,axis=1)
x_test = tf.keras.utils.normalize(x_test,axis=1)

#Create Neural Network
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(10,activation = tf.nn.softmax))

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics=['accuracy'])

model.fit(x_train,y_train,epochs=50)

#Show the loss and accuracy.
val_loss,val_acc = model.evaluate(x_test,y_test)
print(val_loss,val_acc)

model.save('Hand written digit classification.model.h5')

new_model = tf.keras.models.load_model('Hand written digit classification.model.h5')
predictions = new_model.predict([x_test])

print("The number is : ",np.argmax(predictions[0]))

#Show the image
plt.imshow(x_test[0])
plt.show()

"""**Regularization**"""

#Create Neural Network
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu,activity_regularizer=tf.keras.regularizers.L2(0.01)))
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu,activity_regularizer=tf.keras.regularizers.L2(0.01)))
model.add(tf.keras.layers.Dense(10,activation = tf.nn.softmax))

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics=['accuracy'])

model.fit(x_train,y_train,epochs=50)

#Show the loss and accuracy.
val_loss,val_acc = model.evaluate(x_test,y_test)
print(val_loss,val_acc)

model.save('Hand written digit classification_regularizer.model.h5')

new_model = tf.keras.models.load_model('Hand written digit classification_regularizer.model.h5')
predictions = new_model.predict([x_test])

print("The number is : ",np.argmax(predictions[0]))

#Show the image
plt.imshow(x_test[0])
plt.show()

"""**Dropout**"""

#Create Neural Network
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10,activation = tf.nn.softmax))

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics=['accuracy'])

model.fit(x_train,y_train,epochs=100)

#Show the loss and accuracy.
val_loss,val_acc = model.evaluate(x_test,y_test)
print(val_loss,val_acc)

model.save('Hand written digit classification_dropout.model.h5')

new_model = tf.keras.models.load_model('Hand written digit classification_dropout.model.h5')
predictions = new_model.predict([x_test])

print("The number is : ",np.argmax(predictions[0]))

#Show the image
plt.imshow(x_test[0])
plt.show()

"""**Early Stopping**"""

#Create Neural Network
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(10,activation = tf.nn.softmax))

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics=['accuracy'])

callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=2)

model.fit(x_train,y_train,epochs=50,callbacks=[callback])

#Show the loss and accuracy.
val_loss,val_acc = model.evaluate(x_test,y_test)
print(val_loss,val_acc)

model.save('Hand written digit classification_stopping.model.h5')

new_model = tf.keras.models.load_model('Hand written digit classification_stopping.model.h5')
predictions = new_model.predict([x_test])

print("The number is : ",np.argmax(predictions[0]))

#Show the image
plt.imshow(x_test[0])
plt.show()

"""**Using Augmentation techniques**"""

mnist = tf.keras.datasets.mnist #Get the data

(x_train, y_train),(x_test, y_test) = mnist.load_data()

x_train = tf.keras.utils.normalize(x_train,axis=1)
x_test = tf.keras.utils.normalize(x_test,axis=1)

image_size = x_train.shape[1]
input_size = image_size * image_size
batch_size = 128
hidden_units = 256
epochs = 20
max_batches = len(x_train) / batch_size

from keras.preprocessing.image import ImageDataGenerator

rotation_range_val = 30
datagen = ImageDataGenerator(rotation_range=rotation_range_val)
# fit the generator
datagen.fit(x_train.reshape(x_train.shape[0], 28, 28, 1))
# define number of rows & columns
num_row = 2
num_col = 8
num= num_row*num_col
# plot before
print('BEFORE:n')
# plot images
fig1, axes1 = plt.subplots(num_row, num_col, figsize=(1.5*num_col,2*num_row))
for i in range(num):
ax = axes1[i//num_col, i%num_col]
ax.imshow(x_train[i], cmap='gray_r')
ax.set_title('Label: {}'.format(y_train[i]))
plt.tight_layout()
plt.show()
# plot after
print('AFTER:n')
fig2, axes2 = plt.subplots(num_row, num_col, figsize=(1.5*num_col,2*num_row))
for X, Y in datagen.flow(x_train.reshape(x_train.shape[0], 28, 28, 1),y_train.reshape(y_train.shape[0], 1),batch_size=num,shuffle=False):
for i in range(0, num):
ax = axes2[i//num_col, i%num_col]
ax.imshow(X[i].reshape(28,28), cmap='gray_r')
ax.set_title('Label: {}'.format(int(Y[i])))
break
plt.tight_layout()
plt.show()

#Create Neural Network
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(10,activation = tf.nn.softmax))

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics=['accuracy'])

model.fit(X,Y,epochs=500)

x_test = np.reshape(x_test, [-1, input_size])
scores = model.evaluate(x_test,
y_test,
batch_size=batch_size,
verbose=False)
print('Test loss:', scores[0])
print('Test accuracy: %0.1f%%' % (100 * scores[1]) )

#Show the loss and accuracy.
x_test = np.reshape(x_test, [-1, input_size])
val_loss,val_acc = model.evaluate(x_test,y_test)
print(val_loss,val_acc)

model.save('Hand written digit classification_augmentation.model.h5')

new_model = tf.keras.models.load_model('Hand written digit classification_augmentation.model.h5')
predictions = new_model.predict([x_test])

print("The number is : ",np.argmax(predictions[0]))

#Show the ima
plt.imshow(x_test[0])
plt.show()

"""**Optimizing neural networks using L2 regularization, Dropout and early stopping.**"""

mnist = tf.keras.datasets.mnist #Get the data

(x_train, y_train),(x_test, y_test) = mnist.load_data()

x_train = tf.keras.utils.normalize(x_train,axis=1)
x_test = tf.keras.utils.normalize(x_test,axis=1)

#Create Neural Network
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu,activity_regularizer=tf.keras.regularizers.L2(0.01)))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(128,activation = tf.nn.relu,activity_regularizer=tf.keras.regularizers.L2(0.01)))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10,activation = tf.nn.softmax))

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics=['accuracy'])

callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=2)

model.fit(x_train,y_train,epochs=50,callbacks=[callback])

#Show the loss and accuracy.
val_loss,val_acc = model.evaluate(x_test,y_test)
print(val_loss,val_acc)

model.save('Hand written digit classification_usingalltech.model.h5')

new_model = tf.keras.models.load_model('Hand written digit classification_usingalltech.model.h5')
predictions = new_model.predict([x_test])

#Convert to understandable form.
print("The number is : ",np.argmax(predictions[0]))
#If you want change the test and prediction numbers

#Show the image
plt.imshow(x_test[0])
plt.show()
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
LAB - 7 Le-Net

import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import datasets, layers, models, losses

(x_train, y_train), (x_test, y_test)=tf.keras.datasets.mnist.load_data()

x_train = tf.pad(x_train, [[0, 0], [2,2], [2,2]])/255
x_test = tf.pad(x_test, [[0, 0], [2,2], [2,2]])/255
x_train.shape

x_train = tf.expand_dims(x_train, axis=3, name=None)
x_test = tf.expand_dims(x_test, axis=3, name=None)
x_train.shape

x_val = x_train[-2000:,:,:,:]
y_val = y_train[-2000:]
x_train = x_train[:-2000,:,:,:]
y_train = y_train[:-2000]

model = models.Sequential()
model.add(layers.Conv2D(6, 5, activation='tanh', input_shape=x_train.shape))
model.add(layers.AveragePooling2D(2))
model.add(layers.Activation('sigmoid'))
model.add(layers.Conv2D(16, 5, activation='tanh'))
model.add(layers.AveragePooling2D(2))
model.add(layers.Activation('sigmoid'))
model.add(layers.Conv2D(120, 5, activation='tanh'))
model.add(layers.Flatten())
model.add(layers.Dense(84, activation='tanh'))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer='adam', loss=losses.sparse_categorical_crossentropy, metrics=['accuracy'])
history = model.fit(x_train, y_train, batch_size=64, epochs=40, validation_data=(x_val, y_val))
fig, axs = plt.subplots(2, 1, figsize=(15,15))
axs[0].plot(history.history['loss'])
axs[0].plot(history.history['val_loss'])
axs[0].title.set_text('Training Loss vs Validation Loss')
axs[0].legend(['Train', 'Val'])
axs[1].plot(history.history['accuracy'])
axs[1].plot(history.history['val_accuracy'])
axs[1].title.set_text('Training Accuracy vs Validation Accuracy')
axs[1].legend(['Train', 'Val'])
model.evaluate(x_test, y_test)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
LAB-8 Res-Net

import tensorflow as tf
from tensorflow import keras
import numpy as np
from tensorflow.keras.applications import ResNet50
from keras.utils import to_categorical

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()

y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten

model_resnet_50 = Sequential()
model_resnet_50.add(ResNet50(include_top=False,weights="imagenet",input_tensor=None,input_shape=(32,32,3),pooling='avg',classes=10))
model_resnet_50.add(Flatten())
model_resnet_50.add(Dense(1024, activation='relu'))
model_resnet_50.add(Dense(512, activation='relu'))
model_resnet_50.add(Dense(10, activation='softmax'))

model_resnet_50.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])

model_resnet_50.summary()

model_resnet_50.fit(x_train, y_train, batch_size=128, epochs=20, verbose=1, validation_data=(x_test, y_test))

loss, accuracy = model_resnet_50.evaluate(x_test, y_test, batch_size=64)

Actual = np.argmax(model_resnet_50.predict(x_test),axis=1)
Actual

for i in range(len(y_test)):
print("X = %s, Predicted = %s" % (y_test[i], Actual[i]))

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
LAB-9 SKIP GRAM MODEL

import numpy as np
import string
from nltk.corpus import stopwords

import nltk
stopwods=nltk.download('stopwords')

def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()

class word2vec(object):
def __init__(self):
self.N = 10
self.X_train = []
self.y_train = []
self.window_size = 2
self.alpha = 0.001
self.words = []
self.word_index = {}

def initialize(self,V,data):
self.V = V
self.W = np.random.uniform(-0.8, 0.8, (self.V, self.N))
self.W1 = np.random.uniform(-0.8, 0.8, (self.N, self.V))

self.words = data
for i in range(len(data)):
self.word_index[data[i]] = i

def feed_forward(self,X):
self.h = np.dot(self.W.T,X).reshape(self.N,1)
self.u = np.dot(self.W1.T,self.h)
#print(self.u)
self.y = softmax(self.u)
return self.y

def backpropagate(self,x,t):
e = self.y - np.asarray(t).reshape(self.V,1)
dLdW1 = np.dot(self.h,e.T)
X = np.array(x).reshape(self.V,1)
dLdW = np.dot(X, np.dot(self.W1,e).T)
self.W1 = self.W1 - self.alpha*dLdW1
self.W = self.W - self.alpha*dLdW

def train(self,epochs):
for x in range(1,epochs):
self.loss = 0
for j in range(len(self.X_train)):
self.feed_forward(self.X_train[j])
self.backpropagate(self.X_train[j],self.y_train[j])
C = 0
for m in range(self.V):
if(self.y_train[j][m]):
self.loss += -1*self.u[m][0]
C += 1
self.loss += C*np.log(np.sum(np.exp(self.u)))
print("epoch ",x, " loss = ",self.loss)
self.alpha *= 1/( (1+self.alpha*x) )

def predict(self,word,number_of_predictions):
if word in self.words:
index = self.word_index[word]
X = [0 for i in range(self.V)]
X[index] = 1
prediction = self.feed_forward(X)
output = {}
for i in range(self.V):
output[prediction[i][0]] = i

top_context_words = []
for k in sorted(output,reverse=True):
top_context_words.append(self.words[output[k]])
if(len(top_context_words)>=number_of_predictions):
break

return top_context_words
else:
print("Word not found in dictionary")

def preprocessing(corpus):
stop_words = set(stopwords.words('english'))
training_data = []
sentences = corpus.split(".")
for i in range(len(sentences)):
sentences[i] = sentences[i].strip()
sentence = sentences[i].split()
x = [word.strip(string.punctuation) for word in sentence
if word not in stop_words]
x = [word.lower() for word in x]
training_data.append(x)
return training_data

def prepare_data_for_training(sentences,w2v):
data = {}
for sentence in sentences:
for word in sentence:
if word not in data:
data[word] = 1
else:
data[word] += 1
V = len(data)
data = sorted(list(data.keys()))
vocab = {}
for i in range(len(data)):
vocab[data[i]] = i

for sentence in sentences:
for i in range(len(sentence)):
center_word = [0 for x in range(V)]
center_word[vocab[sentence[i]]] = 1
context = [0 for x in range(V)]

for j in range(i-w2v.window_size,i+w2v.window_size):
if i!=j and j>=0 and j<len(sentence):
context[vocab[sentence[j]]] += 1
w2v.X_train.append(center_word)
w2v.y_train.append(context)
w2v.initialize(V,data)

return w2v.X_train,w2v.y_train

corpus = ""
corpus += "The quick brown fox jumps over the lazy red dog"
epochs = 100

training_data = preprocessing(corpus)
w2v = word2vec()

prepare_data_for_training(training_data,w2v)
w2v.train(epochs)

print(w2v.predict("jumps",5))

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
LAB-10 VGG16
import tensorflow_datasets as tfds
from tensorflow.keras.utils import to_categorical

## Loading images and labels
(train_ds, train_labels), (test_ds, test_labels) = tfds.load("tf_flowers",split=["train[:70%]", "train[:30%]"],batch_size=-1,as_supervised=True,)

print(train_labels.)

import tensorflow as tf
train_ds = tf.image.resize(train_ds, (150, 150))
test_ds = tf.image.resize(test_ds, (150, 150))

## Transforming labels to correct format
train_labels = to_categorical(train_labels, num_classes=5)
test_labels = to_categorical(test_labels, num_classes=5)

"""***load the VGG16 model.***"""

from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input

## Loading VGG16 model
base_model = VGG16(weights="imagenet", include_top=False, input_shape=train_ds[0].shape)
base_model.trainable = False

## Preprocessing input
train_ds = preprocess_input(train_ds)
test_ds = preprocess_input(test_ds)

base_model.summary()

"""***Add Custom Layers***"""

from tensorflow.keras import layers, models
model=models.Sequential()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.Dense(50,activation='relu'))
model.add(layers.Dense(20,activation='relu'))
model.add(layers.Dense(5,activation='softmax'))

model.summary()

from tensorflow.keras.callbacks import EarlyStopping

model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'],)

es = EarlyStopping(monitor='val_accuracy', mode='max', patience=5, restore_best_weights=True)

model.fit(train_ds, train_labels, epochs=50, validation_split=0.2, batch_size=32, callbacks=[es])

val_loss,val_acc = model.evaluate(test_ds,test_labels)
print(val_loss,val_acc)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////LAB-12


from google.colab import files
uploaded=files.upload()

import pandas as pd
data = pd.read_csv('ner_dataset.csv', encoding= 'unicode_escape')
data.head()

from itertools import chain
def get_dict_map(data, token_or_tag):
tok2idx = {}
idx2tok = {}

if token_or_tag == 'token':
vocab = list(set(data['Word'].to_list()))
else:
vocab = list(set(data['Tag'].to_list()))

idx2tok = {idx:tok for idx, tok in enumerate(vocab)}
tok2idx = {tok:idx for idx, tok in enumerate(vocab)}
return tok2idx, idx2tok


token2idx, idx2token = get_dict_map(data, 'token')
tag2idx, idx2tag = get_dict_map(data, 'tag')

data['Word_idx'] = data['Word'].map(token2idx)
data['Tag_idx'] = data['Tag'].map(tag2idx)
data.head()

# Fill na
data_fillna = data.fillna(method='ffill', axis=0)
# Groupby and collect columns
data_group = data_fillna.groupby(
['Sentence #'],as_index=False
)['Word', 'POS', 'Tag', 'Word_idx', 'Tag_idx'].agg(lambda x: list(x))
# Visualise data
data_group.head()

from sklearn.model_selection import train_test_split
from keras_preprocessing.sequence import pad_sequences
from keras.utils import to_categorical

def get_pad_train_test_val(data_group, data):

#get max token and tag length
n_token = len(list(set(data['Word'].to_list())))
n_tag = len(list(set(data['Tag'].to_list())))

#Pad tokens (X var)
tokens = data_group['Word_idx'].tolist()
maxlen = max([len(s) for s in tokens])
pad_tokens = pad_sequences(tokens, maxlen=maxlen, dtype='int32', padding='post', value= n_token - 1)

#Pad Tags (y var) and convert it into one hot encoding
tags = data_group['Tag_idx'].tolist()
pad_tags = pad_sequences(tags, maxlen=maxlen, dtype='int32', padding='post', value= tag2idx["O"])
n_tags = len(tag2idx)
pad_tags = [to_categorical(i, num_classes=n_tags) for i in pad_tags]

#Split train, test and validation set
tokens_, test_tokens, tags_, test_tags = train_test_split(pad_tokens, pad_tags, test_size=0.1, train_size=0.9, random_state=2020)
train_tokens, val_tokens, train_tags, val_tags = train_test_split(tokens_,tags_,test_size = 0.25,train_size =0.75, random_state=2020)

print(
'train_tokens length:', len(train_tokens),
'ntrain_tokens length:', len(train_tokens),
'ntest_tokens length:', len(test_tokens),
'ntest_tags:', len(test_tags),
'nval_tokens:', len(val_tokens),
'nval_tags:', len(val_tags),
)

return train_tokens, val_tokens, test_tokens, train_tags, val_tags, test_tags

train_tokens, val_tokens, test_tokens, train_tags, val_tags, test_tags = get_pad_train_test_val(data_group, data)

import numpy as np
import tensorflow
from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional
from tensorflow.keras.utils import plot_model

from numpy.random import seed
seed(1)
tensorflow.random.set_seed(2)

input_dim = len(list(set(data['Word'].to_list())))+1
output_dim = 64
input_length = max([len(s) for s in data_group['Word_idx'].tolist()])
n_tags = len(tag2idx)
print('input_dim: ', input_dim, 'noutput_dim: ', output_dim, 'ninput_length: ', input_length, 'nn_tags: ', n_tags)

def get_bilstm_lstm_model():
model = Sequential()

# Add Embedding layer
model.add(Embedding(input_dim=input_dim, output_dim=output_dim, input_length=input_length))

# Add bidirectional LSTM
model.add(Bidirectional(LSTM(units=output_dim, return_sequences=True, dropout=0.2, recurrent_dropout=0.2), merge_mode = 'concat'))

# Add LSTM
model.add(LSTM(units=output_dim, return_sequences=True, dropout=0.5, recurrent_dropout=0.5))

# Add timeDistributed Layer
model.add(TimeDistributed(Dense(n_tags, activation="relu")))

#Optimiser
# adam = k.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999)

# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()

return model

def train_model(X, y, model):
loss = list()
for i in range(5):
# fit model for one epoch on this sequence
hist = model.fit(X, y, batch_size=1000, verbose=1, epochs=1, validation_split=0.2)
loss.append(hist.history['loss'][0])
return loss

results = pd.DataFrame()
model_bilstm_lstm = get_bilstm_lstm_model()
plot_model(model_bilstm_lstm)
results['with_add_lstm'] = train_model(train_tokens, np.array(train_tags), model_bilstm_lstm)

import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
text = nlp(
'Jim bought 300 shares of Acme Corp. in 2006. And producing an annotated block of text that highlights the names of entities: [Jim]Person bought 300 shares of [Acme Corp.]Organization in [2006]Time. In this example, a person name consisting of one token, a two-token company name and a temporal expression have been detected and classified.State-of-the-art NER systems for English produce near-human performance. For example, the best system entering MUC-7 scored 93.39% of F-measure while human annotators scored 97.60% and 96.95%.[1][2]'
)
displacy.render(text, style = 'ent', jupyter=True)


Notes
I'm Feeling Lucky

If you like notes.io, please support us via Cryptocurrency(Ether,Doge,Monero)


Paste Keyboard iOS - Quick Replies

v 2.7.1

We'd love to hear from you. Please email us at [email protected]

Copyright 2022 Metromedya

ios uygulama geliştirme
     
 
what is notes.io
 

Notes.io is a web-based application for taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000 notes created and continuing...

With notes.io;

  • * You can take a note from anywhere and any device with internet connection.
  • * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
  • * You can quickly share your contents without website, blog and e-mail.
  • * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
  • * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.

Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.

Easy: Notes.io doesn’t require installation. Just write and share note!

Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )

Free: Notes.io works for 12 years and has been free since the day it was started.


You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;


Email: [email protected]

Twitter: http://twitter.com/notesio

Instagram: http://instagram.com/notes.io

Facebook: http://facebook.com/notesio



Regards;
Notes.io Team

     
 
Shortened Note Link
 
 
Looding Image
 
     
 
Long File
 
 

For written notes was greater than 18KB Unable to shorten.

To be smaller than 18KB, please organize your notes, or sign in.