NotesWhat is notes.io?

Notes brand slogan

Notes - notes.io

import csv
with open('findsdataset.csv', 'r') as f:
reader = csv.reader(f)
your_list = list(reader)
h = [['0', '0', '0', '0', '0', '0']]
for i in your_list:
print(i)
if i[-1] == "TRUE":
j = 0
for x in i:
if x != "TRUE":
if x != h[0][j] and h[0][j] == '0':
h[0][j] = x
elif x != h[0][j] and h[0][j] != '0':
h[0][j] = '?'
else:
pass
j = j + 1
print("A Maximally Specific hypothesis is")
print(h)
______________________________________________________________________
2.
import numpy as np
import pandas as pd
data=pd.read_csv('findsdataset.csv')
data = pd.DataFrame(data)
print('The Dataset is: n')
print(data)

concepts = np.array(data.iloc[:,0:-1])

print('n The Concepts are: n',concepts)
target = np.array(data.iloc[:,-1])
print('nThe target is: n',target)

def learn(concepts, target):
specific_h = concepts[0].copy()
general_h = [["?" for i in range(len(specific_h))] for i in range(len(specific_h))]
print(general_h)
for i, h in enumerate(concepts):
print(i, h)
if target[i] == "Yes":
for x in range(len(specific_h)):
if h[x] != specific_h[x]:
specific_h[x] = '?'
general_h[x][x] = '?'


if target[i] == "No":
for x in range(len(specific_h)):
if h[x] != specific_h[x]:
general_h[x][x] = specific_h[x]
else:
general_h[x][x] = '?'

# indices = [i for i,val in enumerate(general_h) if val == ['?', '?', '?', '?', '?', '?']]

# for i in indices:
# general_h.remove(['?', '?', '?', '?', '?', '?'])

return specific_h, general_h

s_final, g_final = learn(concepts, target)
print("nnFinal S:", s_final)

print("nnFinal G:")

for i in g_final:
print(i)
_____________________________________________________________________________
4.
import numpy as np
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
y = np.array(([92], [86], [89]), dtype=float)
X = X/np.max(X,axis=0) # maximum of X array longitudinally
y = y/100
#Sigmoid Function
def sigmoid (x):
return 1/(1 + np.exp(-x))
#Derivative of Sigmoid Function
def derivatives_sigmoid(x):
return x * (1 - x)
#Variable initialization
epoch=7000 #Setting training iterations
lr=0.1 #Setting learning rate
inputlayer_neurons = 2 #number of features in data set
hiddenlayer_neurons = 3 #number of hidden layers neurons
output_neurons = 1 #number of neurons at output layer
#weight and bias initialization
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))
#draws a random range of numbers uniformly of dim x*y
for i in range(epoch):
#Forward Propogation
hinp1=np.dot(X,wh)
hinp=hinp1 + bh
hlayer_act = sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
outinp= outinp1+ bout
output = sigmoid(outinp)
#Backpropagation
EO = y-output
outgrad = derivatives_sigmoid(output)
d_output = EO* outgrad
EH = d_output.dot(wout.T)
hiddengrad = derivatives_sigmoid(hlayer_act)#how much hidden layer wts contributed to error
d_hiddenlayer = EH * hiddengrad
wout += hlayer_act.T.dot(d_output) *lr# dotproduct of nextlayererror and currentlayerop
# bout += np.sum(d_output, axis=0,keepdims=True) *lr
wh += X.T.dot(d_hiddenlayer) *lr
#bh += np.sum(d_hiddenlayer, axis=0,keepdims=True) *lr
print("Input: n" + str(X))
print("Actual Output: n" + str(y))
print("Predicted Output: n" ,output)

________________________________________________________________________________________________________________________________________
5.
#5.Naive Bayesian Classifier
import pandas as pd
import numpy as np
import csv

data1 = pd.read_csv('data5.csv')

df1 = data1[data1['class'] == 'Yes']
df2 = data1[data1['class'] == 'No']

inputlist1=[]

head=['Outlook','Temperature','Humidity','Wind','class']
inputlist1.append(head)
count=0

def counting(inputlist1,j,str1,count):
if(inputlist1[j][4]==str1):
count=count+1
return count

print("ntt Naive Bayesian Classifier")
with open('PT.csv','r') as csv_file1:
csv_reader1=csv.reader(csv_file1)
for i in range(4):
next(csv_reader1)

for line1 in csv_reader1:
inputlist1.append(line1)

for j in range(1,len(inputlist1)):
print("nThe ",j,"Test data is:n",head[0]," = ",inputlist1[j][0],", ",head[1]," = ",inputlist1[j][1],", ",head[2]," = ",inputlist1[j][2],", ",head[3]," = ",inputlist1[j][3])
listyes=list()
listno=list()
resultyes=0.0
resultno=0.0

for d in range(4):
listyes.append(df1.loc[df1[head[d]]==inputlist1[j][d],head[d]].count()/len(df1))
listno.append(df2.loc[df2[head[d]]==inputlist1[j][d],head[d]].count()/len(df2))

resultyes = np.prod(np.array(listyes))*(len(df1)/len(data1))
resultno = np.prod(np.array(listno))*(len(df2)/len(data1))
print("Probability of Yes: ",resultyes,"nProbability of No: ",resultno)

if resultyes>resultno:
print("Classified as YESn")
count=counting(inputlist1,j,'Yes',count)
else:
print("Classified as NOn")
count=counting(inputlist1,j,'No',count)

print("nAccuracy of the Classifier is: ",count/(len(inputlist1)-1) )
___________________________________________________________________________________________________
6.
import pandas as pd
msg=pd.read_csv('naivetext1.txt',names=['message','label'])
print('The dimensions of the dataset',msg.shape)
msg['labelnum']=msg.label.map({'pos':1,'neg':0})
X=msg.message
y=msg.labelnum
print(X)
print(y)

#splitting the dataset into train and test data
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(X,y)
print(xtest.shape)
print(xtrain.shape)
print(ytest.shape)
print(ytrain.shape)

print(xtest)
print(xtrain)
print(ytest)
print(ytrain)
#output of count vectoriser is a sparse matrix
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
xtrain_dtm = count_vect.fit_transform(xtrain)
xtest_dtm=count_vect.transform(xtest)
gfn=print(count_vect.get_feature_names())

df=pd.DataFrame(xtrain_dtm.toarray(),columns=gfn)
print(df)#tabular representation
#print(xtrain_dtm) #sparse matrix representation

# Training Naive Bayes (NB) classifier on training data.
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB().fit(xtrain_dtm,ytrain)#to load the text data
predicted = clf.predict(xtest_dtm)

#printing accuracy metrics
from sklearn import metrics
print('Accuracy metrics')
print('Accuracy of the classifer is',metrics.accuracy_score(ytest,predicted))
print('Confusion matrix')
print(metrics.confusion_matrix(ytest,predicted))
print('Recall and Precison ')
print(metrics.recall_score(ytest,predicted))
print(metrics.precision_score(ytest,predicted))

'''docs_new = ['I like this place', 'My boss is not my saviour']
X_new_counts = count_vect.transform(docs_new)
predictednew = clf.predict(X_new_counts)
for doc, category in zip(docs_new, predictednew):
print('%s->%s' % (doc, msg.labelnum[category]))'''

________________________________________________________________________________________________________
7.
import bayespy as bp
import random
import numpy as np
import csv

ageEnum = {'SuperSeniorCitizen':0,'SeniorCitizen':1,'MiddleAged':2,'Youth':3,'Teen':4}
genderEnum = {'Male':0,'Female':1}
familyHistoryEnum = {'Yes':0,'No':1}
dietEnum = {'High':0,'Medium':1,'Low':2}
lifeStyleEnum = {'Athlete':0,'Active':1,'Moderate':2,'Sedetary':3}
cholestrolEnum = {'High':0,'BorderLine':1,'Normal':2}
heartDiseaseEnum = {'Yes':0,'No':1}
with open('heart_disease_data.csv') as csvfile :
lines = csv.reader(csvfile)
dataset = list(lines)
data = []
for x in dataset :
data.append([ageEnum[x[0]],genderEnum[x[1]],familyHistoryEnum[x[2]],dietEnum[x[3]],lifeStyleEnum[x[4]],cholestrolEnum[x[5]],heartDiseaseEnum[x[6]]])

data = np.array(data)
N = len(data)

p_age = bp.nodes.Dirichlet(1.0*np.ones(5))
age = bp.nodes.Categorical(p_age,plates=(N,))
age.observe(data[:,0])

p_gender = bp.nodes.Dirichlet(1.0*np.ones(2))
gender = bp.nodes.Categorical(p_gender,plates=(N,))
gender.observe(data[:,1])

p_familyhistory = bp.nodes.Dirichlet(1.0*np.ones(2))
familyhistory = bp.nodes.Categorical(p_familyhistory,plates=(N,))
familyhistory.observe(data[:,2])

p_diet = bp.nodes.Dirichlet(1.0*np.ones(3))
diet = bp.nodes.Categorical(p_diet,plates=(N,))
diet.observe(data[:,3])

p_lifestyle = bp.nodes.Dirichlet(1.0*np.ones(4))
lifestyle = bp.nodes.Categorical(p_lifestyle,plates=(N,))
lifestyle.observe(data[:,4])

p_cholesterol = bp.nodes.Dirichlet(1.0*np.ones(3))
cholesterol = bp.nodes.Categorical(p_cholesterol,plates=(N,))
cholesterol.observe(data[:,5])

p_heartdisease = bp.nodes.Dirichlet(np.ones(2),plates=(5,2,2,3,4,3))
heartdisease = bp.nodes.MultiMixture([age,gender,familyhistory,diet,lifestyle,cholesterol],bp.nodes.Categorical,p_heartdisease)
heartdisease.observe(data[:,6])
p_heartdisease.update()

m=0
while m==0 :
print("n")
a=int(input('Enter Age : ' + str(ageEnum)))
g=int(input('Enter Gender : ' + str(genderEnum)))
fh=int(input('Enter Family History : ' + str(familyHistoryEnum)))
d=int(input('Enter Diet : ' + str(dietEnum)))
ls=int(input('Enter Lifestyle : ' + str(lifeStyleEnum)))
c=int(input('Enter Cholesterol : ' + str(cholestrolEnum)))
res = bp.nodes.MultiMixture([a,g,fh,d,ls,c],bp.nodes.Categorical,p_heartdisease)
r1=res.get_moments()[0][heartDiseaseEnum['Yes']]
#*random.randint(1,100)/100
print("Probability(HeartDisease) = " + str(r1))
m=int(input("Enter for Continue : 0, Exit : 1"))
________________________________________________________________________________________________________________________________
8.
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
data = pd.read_csv('xclara.csv')
print("Input Data and Shape")
print(data.shape)
f1 = data['V1'].values
f2 = data['V2'].values
X = np.array(list(zip(f1, f2)))
print('Graph for whole dataset')
plt.scatter(f1, f2, c='black', s=7)
plt.show()
print('Graph using Kmeans Algorithm')
kmeans = KMeans(3)
labels = kmeans.fit(X).predict(X)
centroids = kmeans.cluster_centers_
plt.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis')
plt.scatter(centroids[:, 0], centroids[:, 1], marker='*', s=200, c='black')
plt.show()
print('Graph using EM Algorithm')
gmm = GaussianMixture(3)
labels = gmm.fit(X).predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=10, cmap='viridis')
plt.show()
_________________________________________________________________________________________________________________
9.
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn import datasets
iris=datasets.load_iris()
iris_data=iris.data
iris_labels=iris.target
print(iris_data)
print(iris_labels)
x_train, x_test, y_train, y_test=train_test_split(iris_data,iris_labels)

classifier=KNeighborsClassifier(5)
classifier.fit(x_train,y_train)
y_pred=classifier.predict(x_test)
print('confusion matrix is as follows')
print(confusion_matrix(y_test,y_pred))
print('Accuracy metrics')
print(classification_report(y_test,y_pred))
_______________________________________________________________________________________________________
10.
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np1

def kernel(point,xmat, k):
m,n = np1.shape(xmat)
weights = np1.mat(np1.eye(m))
for j in range(m):
diff = point - X[j]
weights[j,j] = np1.exp(diff*diff.T/(-2.0*k**2))
return weights

def localWeight(point,xmat,ymat,k):
wei = kernel(point,xmat,k)
W=(X.T*(wei*X)).I*(X.T*(wei*ymat.T))
return W

def localWeightRegression(xmat,ymat,k):
m,n = np1.shape(xmat)
ypred = np1.zeros(m)
for i in range(m):
ypred[i] = xmat[i]*localWeight(xmat[i],xmat,ymat,k)
return ypred

# load data points
data = pd.read_csv('data10.csv')
bill = np1.array(data.total_bill)
tip = np1.array(data.tip)

#preparing and add 1 in bill
mbill = np1.mat(bill)
mtip = np1.mat(tip)
m= np1.shape(mbill)[1]
one = np1.mat(np1.ones(m))
X= np1.hstack((one.T,mbill.T))


#set k here
ypred = localWeightRegression(X,mtip,2)
SortIndex = X[:,1].argsort(0)
xsort = X[SortIndex][:,0]



fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(bill,tip, c='green')
ax.plot(xsort[:,1],ypred[SortIndex], c = 'red', linewidth=5)
plt.xlabel('Total bill')
plt.ylabel('Tip')
plt.show();
     
 
what is notes.io
 

Notes.io is a web-based application for taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000 notes created and continuing...

With notes.io;

  • * You can take a note from anywhere and any device with internet connection.
  • * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
  • * You can quickly share your contents without website, blog and e-mail.
  • * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
  • * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.

Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.

Easy: Notes.io doesn’t require installation. Just write and share note!

Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )

Free: Notes.io works for 12 years and has been free since the day it was started.


You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;


Email: [email protected]

Twitter: http://twitter.com/notesio

Instagram: http://instagram.com/notes.io

Facebook: http://facebook.com/notesio



Regards;
Notes.io Team

     
 
Shortened Note Link
 
 
Looding Image
 
     
 
Long File
 
 

For written notes was greater than 18KB Unable to shorten.

To be smaller than 18KB, please organize your notes, or sign in.