NotesWhat is notes.io?

Notes brand slogan

Notes - notes.io

notes

notes logoFast | Easy | Short
Online Note Services - notes.io
Popular notes

1) SLP
#NAND
def unit(a):
if a>0:
return 1
else:
return 0

def percep(x,w,b):
v=np.dot(x,w)+b
return unit(v)

def notgate(x):
w=-1
b=1
return percep(x,w,b)

def andgate(x):
w=np.array([1,1])
b=-1
return percep(x,w,b)


def nandgate(x):
v=andgate(x)
return notgate(v)

t1=np.array([1,1])
t2=np.array([1,0])
t3=np.array([0,1])
t4=np.array([0,0])
print('NAND({},{}) = {}'.format(1,1,nandgate(t1)))
print('NAND({},{}) = {}'.format(1,0,nandgate(t2)))
print('NAND({},{}) = {}'.format(0,1,nandgate(t3)))
print('NAND({},{}) = {}'.format(0,0,nandgate(t4)))

#NOR
def unit(a):
if a>0:
return 1
else:
return 0

def percep(x,w,b):
v=np.dot(x,w)+b
return unit(v)

def ORgate(x):
w=np.array([2,2])
b=-1
return percep(x,w,b)

def notgate(x):
w=-1
b=1
return percep(x,w,b)

def norgate(x):
v=ORgate(x)
x=notgate(v)
return x


t1=np.array([1,1])
t2=np.array([1,0])
t3=np.array([0,1])
t4=np.array([0,0])
print('NOR({},{}) = {}'.format(1,1,norgate(t1)))
print('NOR({},{}) = {}'.format(1,0,norgate(t2)))
print('NOR({},{}) = {}'.format(0,1,norgate(t3)))
print('NOR({},{}) = {}'.format(0,0,norgate(t4)))

#NOT
def unit(a):
if a>0:
return 1
else:
return 0

def perceptron(x,weight,bias):
v=np.dot(x,weight)+bias
return unit(v)

def NOT_function(x):
bias=1
weight=-1
return perceptron(x,weight,bias)


test1=np.array(1)
test2=np.array(0)
print('NOT({}) = {}'.format(1,NOT_function(test1)))
print('NOT({}) = {}'.format(0,NOT_function(test2)))









2) Single Layer Perceptron for Regression
weights=[0,0]
threshold=0
leraningrate=1
bias=1
max_iterations=50
x=[
[0.65,0.42,-1],
[0.54,0.36,-1],
[0.23,0.32,1],
[0.12,0.23,1],
[0.98,0.89,-1],
[0.23,0.43,1],
]
y=0
for k in range(0,max_iterations):
hits=0
for i in range(0,len(x)):
sum=0
for j in range(0,len(x[i])-1):
sum+=weights[j]*x[i][j]
output=sum+bias
if output>threshold:
y=1
else:
y=-1
if y == x[i][2]:
hits+=1
else:
for j in range(0,len(weights)):
weights[j]+=leraningrate*x[i][j]*x[i][2]
bias+=leraningrate*x[i][2]
answer="weight updated to "+str(weights)
if y==1:
print("n"+answer)
if y==-1:
print("n"+answer)
if hits==len(x):
print("n")
print("nFunctionality learned with "+str(k)+" iterations!")
break;








3) Single Layer Perceptron for Classification
import tensorflow as tf
import matplotlib.pyplot as plt # Parameters
learning_rate = 0.01
training_epochs = 25
batch_size = 100
display_step = 1 # tf Graph Input
x = tf.placeholder("float", [none, 784]) # MNIST data image of shape 28*28 = 784 y = tf.placeholder("float", [none, 10]) # 0-9 digits recognition => 10 classes
# Create model
# Set model weights
W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10]))
# Constructing the model activation=tf.nn.softmaxx(tf.matmul (x, W)+b) # Softmax

of function
# Minimizing error using cross entropy cross_entropy = y*tf.log(activation)
cost = tf.reduce_mean (-tf.reduce_sum (cross_entropy, reduction_indice = 1)) optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Plot settings
avg_set = [] epoch_set = []
# Initializing the variables where init = tf.initialize_all_variables() # Launching the graph
with tf.Session() as sess:
sess.run(init)


# Training of the cycle in the dataset for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_example/batch_size)


# Creating loops at all the batches in the code for i in range(total_batch):


batch_xs, batch_ys = mnist.train.next_batch(batch_size)


# Fitting the training by the batch data sess.run(optimizr, feed_dict = { x: batch_xs, y: batch_ys})
# Compute all the average of loss avg_cost += sess.run(cost, feed_dict = { x: batch_xs, y: batch_ys}) //total batch
# Display the logs at each epoch steps

if epoch % display_step==0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format (avg_cost)) avg_set.append(avg_cost) epoch_set.append(epoch+1)
print ("Training phase finished")


plt.plot(epoch_set,avg_set, 'o', label = 'Logistics Regression Training') plt.ylabel('cost')
plt.xlabel('epoch') plt.legend() plt.show()

# Test the model
correct_prediction = tf.equal (tf.argmax (activation, 1),tf.argmax(y,1))


# Calculating the accuracy of dataset
accuracy = tf.reduce_mean(tf.cast (correct_prediction, "float")) print
("Model accuracy:", accuracy.eval({x:mnist.test.images, y: mnist.test.labels}))










4) Linear regression using MLP

import pandas as pd
m=0
c=0
X=[5.1,6.2,5.8,5.5,5.0,5.3,6.0]
Y=[54,75,67,65,54,59,69]
print(list(zip(X,Y)))
data=pd.DataFrame(list(zip(X,Y)),columns=['X','Y'])
data
X=data.iloc[:,0]
Y=data.iloc[:,1]
L=0.0001
epochs=1000
n=float(len(X))
#performing gradient descent
for i in range(epochs):
Y_pred=m*X+c
D_m=(-2/n)*sum(X*(Y-Y_pred))
D_c=(-2/n)*sum(Y-Y_pred)
m=m-L*D_m#update m
c=c-L*D_c#update c
print(m,c)
def predict(x):
return m*x+c
predict(6.0)






7) ID3 Algorithm
import pandas as pd
import math
import numpy as np

data = pd.read_csv("3-dataset.csv")
features = [feat for feat in data]
features.remove("answer")

class Node:
def __init__(self):
self.children = []
self.value = ""
self.isLeaf = False
self.pred = ""

def entropy(examples):
pos = 0.0
neg = 0.0
for _, row in examples.iterrows():
if row["answer"] == "yes":
pos += 1
else:
neg += 1
if pos == 0.0 or neg == 0.0:
return 0.0
else:
p = pos / (pos + neg)
n = neg / (pos + neg)
return -(p * math.log(p, 2) + n * math.log(n, 2))

def info_gain(examples, attr):
uniq = np.unique(examples[attr])
#print ("n",uniq)
gain = entropy(examples)
#print ("n",gain)
for u in uniq:
subdata = examples[examples[attr] == u]
#print ("n",subdata)
sub_e = entropy(subdata)
gain -= (float(len(subdata)) / float(len(examples))) * sub_e
#print ("n",gain)
return gain

def ID3(examples, attrs):
root = Node()

max_gain = 0
max_feat = ""
for feature in attrs:
#print ("n",examples)
gain = info_gain(examples, feature)
if gain > max_gain:
max_gain = gain
max_feat = feature
root.value = max_feat
#print ("nMax feature attr",max_feat)
uniq = np.unique(examples[max_feat])
#print ("n",uniq)
for u in uniq:
#print ("n",u)
subdata = examples[examples[max_feat] == u]
#print ("n",subdata)
if entropy(subdata) == 0.0:
newNode = Node()
newNode.isLeaf = True
newNode.value = u
newNode.pred = np.unique(subdata["answer"])
root.children.append(newNode)
else:
dummyNode = Node()
dummyNode.value = u
new_attrs = attrs.copy()
new_attrs.remove(max_feat)
child = ID3(subdata, new_attrs)
dummyNode.children.append(child)
root.children.append(dummyNode)
return root

def printTree(root: Node, depth=0):
for i in range(depth):
print("t", end="")
print(root.value, end="")
if root.isLeaf:
print(" -> ", root.pred)
print()
for child in root.children:
printTree(child, depth + 1)

root = ID3(data, features)
printTree(root)









8) Naive Baye’s Algorithm
from sklearn.datasets import load_iris
iris=load_iris()

X=iris.data
y=iris.target

from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.4,random_state=1)

from sklearn.naive_bayes import GaussianNB
gnb=GaussianNB()
gnb.fit(X_train,y_train)
y_pred=gnb.predict(X_test)

from sklearn import metrics
print("Gaussian Naive Bayes model accuracy(in %):",metrics.accuracy_score(y_test,y_pred)*100)



9) Principal Component Analysis
import numpy as np
from numpy import array
from numpy import mean
from numpy import cov
from numpy.linalg import eig

A=array([[2,3],[5,6],[8,9]])
print(A)
M=mean(A.T,axis=1)
C=A-M
print(M)
print(C)
V=cov(C.T)
print(V)
values,vectors=eig(V)
print(vectors)
print(values)
P=vectors.T.dot(C.T)
print(P.T)
print(cov(P))








10) Gray-Level Co-Occurrence Matrix (GLCM)
from skimage.feature import greycomatrix
import numpy as np
image = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 2, 2, 2],
[2, 2, 3, 3]], dtype=np.uint8)

result = greycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4], levels =4, symmetric=True, normed=True)#call function
print(result)







11) Clustering Algorithm
from sklearn.cluster import KMeans
import numpy as np
X=np.array([[1,2],[1,4],[2,0],[10,4],[11,2],[8,6]])
kmeans=KMeans(n_clusters=2,random_state=1)
kmeans.fit(X)
print(kmeans.labels_)
print(kmeans.predict([[1,2]]))
print(kmeans.predict([[10,1]]))
print(kmeans.cluster_centers_)









12) Support Vector Machines
from sklearn.datasets import load_iris
iris=load_iris()
X=iris.data
y=iris.target
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.4,random_state=1)
model=SVC(kernel='linear')
model.fit(X_train,y_train)
y_pred=model.predict(X_test)
from sklearn.metrics import accuracy_score
print('accuracy- ',accuracy_score(y_test,y_pred))









13) Building A Simple Neural Network

from sklearn.datasets import load_iris
iris=load_iris()
x=iris.data
y=iris.target
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
rescaledx=scaler.fit_transform(x)
from keras.models import Sequential
from keras.layers import Dense
model=Sequential()
model.add(Dense(8,input_dim=4,activation='relu'))
model.add(Dense(7,activation='relu'))
model.add(Dense(7,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(rescaledx,y,epochs=100,batch_size=10)
_,accuracy=model.evaluate(rescaledx,y)
print(accuracy)






14) Importance of Data Pre-Processing in Machine Learning
import pandas as pd
import scipy
import numpy as np
from sklearn.preprocessing import MinMaxScaler
names=['preg','plas','pres','skin','test','mass','pedi','age','class']
data=pd.read_csv('pima-indians-diabetes.data.csv',header=None,names=names)
array=data.values
X=array[:,0:8]
Y=array[:,8]
scaler=MinMaxScaler(feature_range=(0,1))
rescaledX=scaler.fit_transform(X)np.set_printoptions(precision=3)
print(rescaledX[0:10,:])
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(12,input_dim=8,activation='relu'))
model.add(Dense(8,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(X,Y,epochs=200,batch_size=10)
loss,accuracy=model.evaluate(X,Y)
print('Accuracy: %.2f' % (accuracy*100))
from numpy import array
Xp = array([[6.0, 158.0, 72.0, 35.0, 0.0, 55.5, 0.627, 50.0]])
Yp = model.predict(Xp)
print(Xp[0],Yp[0])










15) Loading Dataset in Machine Learning
1)
import csv
import numpy as np path=r"/content/diabetes (1).csv"
with open(path,'r') as f:
reader = csv.reader(f,delimiter = ",")
headers = next(reader)
data = list(reader)
data = np.array(data).astype(float)
data
2)
from numpy import loadtxt
from numpy import genfromtxt
path = r"/content/Iris.csv"
datapath= open(path,'r')
data = genfromtxt(datapath)
print(data.shape)
print(data[:3])
3)
from pandas import read_csv
path = r"C:pima-indians-diabetes.csv"
headernames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(path, names=headernames)
print(data.shape)
print(data[:3])





Notes
I'm Feeling Lucky

If you like notes.io, please support us via Cryptocurrency(Ether,Doge,Monero)


Direct Message for Whatsapp

v 2.7.1

We'd love to hear from you. Please email us at [email protected]

Copyright 2021 Metromedya

ios uygulama geliştirme
     
 
what is notes.io
 

Notes.io is a web-based application for taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000 notes created and continuing...

With notes.io;

  • * You can take a note from anywhere and any device with internet connection.
  • * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
  • * You can quickly share your contents without website, blog and e-mail.
  • * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
  • * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.

Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.

Easy: Notes.io doesn’t require installation. Just write and share note!

Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )

Free: Notes.io works for 12 years and has been free since the day it was started.


You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;


Email: [email protected]

Twitter: http://twitter.com/notesio

Instagram: http://instagram.com/notes.io

Facebook: http://facebook.com/notesio



Regards;
Notes.io Team

     
 
Shortened Note Link
 
 
Looding Image
 
     
 
Long File
 
 

For written notes was greater than 18KB Unable to shorten.

To be smaller than 18KB, please organize your notes, or sign in.