NotesWhat is notes.io?

Notes brand slogan

Notes - notes.io

9.
from numpy import *
import operator
from os import listdir
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from numpy.linalg import *

def kernel(point, xmat, k):
m, n = shape(xmat)
weights = mat(eye((m)))
for j in range(m):
diff = point - xmat[j]
weights[j, j] = exp(diff * diff.T / (-2.0 * k**2))
return weights

def localWeight(point, xmat, ymat, k):
wei = kernel(point, xmat, k)
W = (xmat.T * (wei * xmat)).I * (xmat.T * (wei * ymat.T))
return W

def localWeightRegression(xmat, ymat, k):
m, n = shape(xmat)
ypred = zeros(m)
for i in range(m):
ypred[i] = xmat[i] * localWeight(xmat[i], xmat, ymat, k)
return ypred

data = pd.read_csv('tips.csv')
bill = array(data.total_bill)
tip = array(data.tip)
mbill = mat(bill)
mtip = mat(tip)
m = shape(mbill)[1]
one = mat(ones(m))
X = hstack((one.T, mbill.T))

# set k here
ypred = localWeightRegression(X, mtip, 10)
SortIndex = X[:, 1].argsort(0)
xsort = X[SortIndex][:, 0]

fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(bill, tip, color='green')
ax.plot(xsort[:, 1], ypred[SortIndex], color='red', linewidth=5)
plt.xlabel('Total bill')
plt.ylabel('Tip')
plt.show()

8.
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from sklearn.model_selection import train_test_split

iris_dataset = load_iris()
print("n IRIS FEATURES TARGET NAMES: n ", iris_dataset.target_names)
for i in range(len(iris_dataset.target_names)):
print("n[{0}]:[{1}]".format(i, iris_dataset.target_names[i]))
print("n IRIS DATA :n", iris_dataset["data"])
X_train, X_test, y_train, y_test = train_test_split(iris_dataset["data"],
iris_dataset["target"], random_state=0)
print("n Target :n", iris_dataset["target"])
print("n X TRAIN n", X_train)
print("n X TEST n", X_test)
print("n Y TRAIN n", y_train)
print("n Y TEST n", y_test)

kn = KNeighborsClassifier(n_neighbors=1)
kn.fit(X_train, y_train)

for i in range(len(X_test)):
x = X_test[i]
x_new = np.array([x])
prediction = kn.predict(x_new)
print("n Actual : {0} {1}, Predicted :{2}{3}".format(
y_test[i], iris_dataset["target_names"][y_test[i]],
prediction, iris_dataset["target_names"][prediction]
))

print("n TEST SCORE[ACCURACY]: {:.2f}n".format(kn.score(X_test, y_test)))

7.
from copy import deepcopy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans

data = pd.read_csv('ex.csv')
print("Input Data and Shape")
print(data.shape)
data.head()

print(data.head())
f1 = data['V1'].values
print("f1")
print(f1)
f2 = data['V2'].values
X = np.array(list(zip(f1, f2)))
print("x")
print(X)
print('Graph for the whole dataset')
plt.scatter(f1, f2, c='black', s=600)
plt.show()

kmeans = KMeans(2, random_state=0)
labels = kmeans.fit(X).predict(X)
print("labels")
print(labels)
centroids = kmeans.cluster_centers_
print("centroids")
print(centroids)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=40)
print('Graph using Kmeans Algorithm')
plt.scatter(centroids[:, 0], centroids[:, 1], marker='*', s=200, c='#050505')
plt.show()

gmm = GaussianMixture(n_components=2).fit(X)
labels = gmm.predict(X)
print("lLABELS GMM")
print(labels)
probs = gmm.predict_proba(X)
size = 10 * probs.max(1) ** 3
print('Graph using EM Algorithm')
plt.scatter(X[:, 0], X[:, 1], c=labels, s=size, cmap='viridis')
plt.show()

6.
import numpy as np
import pandas as pd

mush = pd.read_csv("mushroom.csv")
mush.replace('?', np.nan, inplace=True)

print(len(mush.columns), "columns, after dropping NA,", len(mush.dropna(axis=1).columns))
mush.dropna(axis=1, inplace=True)

target = 'class'
features = mush.columns[mush.columns != target]
classes = mush[target].unique()
test = mush.sample(frac=0.3)
mush = mush.drop(test.index)

probs = {}
probcl = {}

for x in classes:
mushcl = mush[mush[target] == x][features]
clsp = {}
tot = len(mushcl)

for col in mushcl.columns:
colp = {}

for val, cnt in mushcl[col].value_counts().iteritems():
pr = cnt / tot
colp[val] = pr

clsp[col] = colp
probs[x] = clsp
probcl[x] = len(mushcl) / len(mush)


def probabs(x):
if not isinstance(x, pd.Series):
raise IOError("Arg must be of type Series")

probab = {}

for cl in classes:
pr = probcl[cl]

for col, val in x.iteritems():
try:
pr *= probs[cl][col][val]
except KeyError:
pr = 0

probab[cl] = pr

return probab


def classify(x):
probab = probabs(x)
mx = 0
mxcl = ''

for cl, pr in probab.items():
if pr > mx:
mx = pr
mxcl = cl

return mxcl


b = []

for i in mush.index:
b.append(classify(mush.loc[i, features]) == mush.loc[i, target])

print(sum(b), "correct of", len(mush))
print("Accuracy:", sum(b) / len(mush))

5.
import numpy as np

X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
y = np.array(([92], [86], [89]), dtype=float)

X = X / np.amax(X, axis=0)
y = y / 100

def sigmoid(x):
return 1 / (1 + np.exp(-x))

def derivatives_sigmoid(x):
return x * (1 - x)

epoch = 7000
lr = 0.1
inputlayer_neurons = 2
hiddenlayer_neurons = 3
output_neurons = 1

wh = np.random.uniform(size=(inputlayer_neurons, hiddenlayer_neurons))
bh = np.random.uniform(size=(1, hiddenlayer_neurons))
wout = np.random.uniform(size=(hiddenlayer_neurons, output_neurons))
bout = np.random.uniform(size=(1, output_neurons))

for i in range(epoch):
# Forward Propagation
hinp1 = np.dot(X, wh)
hinp = hinp1 + bh
hlayer_act = sigmoid(hinp)

outinp1 = np.dot(hlayer_act, wout)
outinp = outinp1 + bout
output = sigmoid(outinp)

# Backpropagation
EO = y - output
outgrad = derivatives_sigmoid(output)
d_output = EO * outgrad

EH = d_output.dot(wout.T)
hiddengrad = derivatives_sigmoid(hlayer_act)
d_hiddenlayer = EH * hiddengrad

wout += hlayer_act.T.dot(d_output) * lr
wh += X.T.dot(d_hiddenlayer) * lr

print("Input: n" + str(X))
print("Actual Output: n" + str(y))
print("Predicted Output: n", output)

4.
import pandas as pd
import math
import numpy as np

data = pd.read_csv("tennis.csv")
features = [feat for feat in data]
features.remove("answer")


class Node:
def __init__(self):
self.children = []
self.value = ""
self.isLeaf = False
self.pred = ""


def entropy(examples):
pos = 0.0
neg = 0.0
for _, row in examples.iterrows():
if row["answer"] == "yes":
pos += 1
else:
neg += 1
if pos == 0.0 or neg == 0.0:
return 0.0
else:
p = pos / (pos + neg)
n = neg / (pos + neg)
return -(p * math.log(p, 2) + n * math.log(n, 2))


def info_gain(examples, attr):
uniq = np.unique(examples[attr])
gain = entropy(examples)
for u in uniq:
subdata = examples[examples[attr] == u]
sub_e = entropy(subdata)
gain -= (float(len(subdata)) / float(len(examples))) * sub_e
return gain


def ID3(examples, attrs):
root = Node()
max_gain = 0
max_feat = ""
for feature in attrs:
gain = info_gain(examples, feature)
if gain > max_gain:
max_gain = gain
max_feat = feature
root.value = max_feat
uniq = np.unique(examples[max_feat])
for u in uniq:
subdata = examples[examples[max_feat] == u]
if entropy(subdata) == 0.0:
newNode = Node()
newNode.isLeaf = True
newNode.value = u
newNode.pred = np.unique(subdata["answer"])
root.children.append(newNode)
else:
dummyNode = Node()
dummyNode.value = u
new_attrs = attrs.copy()
new_attrs.remove(max_feat)
child = ID3(subdata, new_attrs)
dummyNode.children.append(child)
root.children.append(dummyNode)
return root


def printTree(root: Node, depth=0):
for i in range(depth):
print("t", end="")
print(root.value, end="")
if root.isLeaf:
print(" -> ", root.pred)
print()
for child in root.children:
printTree(child, depth + 1)


def classify(root: Node, new):
for child in root.children:
if child.value == new[root.value]:
if child.isLeaf:
print("Predicted Label for new example", new, " is:", child.pred)
exit
else:
classify(child.children[0], new)


root = ID3(data, features)
print("Decision Tree is:")
printTree(root)
print("------------------")
new = {"outlook": "sunny", "temperature": "hot", "humidity": "normal", "wind":
"strong"}
classify(root, new)

3.
import numpy as np
import pandas as pd

data = pd.DataFrame(data=pd.read_csv('finds.csv'))
concepts = np.array(data.iloc[:, 0:-1])
target = np.array(data.iloc[:, -1])


def learn(concepts, target):
specific_h = concepts[0].copy()
general_h = [["?" for i in range(len(specific_h))] for i in range(len(specific_h))]

for i, h in enumerate(concepts):
if target[i] == "Yes":
for x in range(len(specific_h)):
if h[x] != specific_h[x]:
specific_h[x] = '?'
general_h[x][x] = '?'
if target[i] == "No":
for x in range(len(specific_h)):
if h[x] != specific_h[x]:
general_h[x][x] = specific_h[x]
else:
general_h[x][x] = '?'

indices = [i for i, val in enumerate(general_h) if val == ['?', '?', '?', '?', '?', '?']]
for i in indices:
general_h.remove(['?', '?', '?', '?', '?', '?'])

return specific_h, general_h


s_final, g_final = learn(concepts, target)
print("Final S:", s_final, sep="n")
print("Final G:", g_final, sep="n")

2.
class Graph:
def __init__(self, adjac_lis):
self.adjac_lis = adjac_lis
self.H1 = {
'A': 1,
'B': 6,
'C': 2,
'D': 2,
'E': 2,
'F': 1,
'G': 5,
'H': 7,
'I': 7,
'J': 1,
'T': 3
}
self.H = {
'A': 1,
'B': 6,
'C': 12,
'D': 10,
'E': 4,
'F': 4,
'G': 5,
'H': 7,
}
self.parent = {}
self.openList = set()
self.hasRevised = []
self.solutionGraph = {}
self.solvedNodeList = {}

def get_neighbors(self, v):
return self.adjac_lis.get(v, '')

def updateNode(self, v):
if v in self.solvedNodeList:
return

feasibleChildNodeList = []
minimumCost = None
minimumCostFeasibleChildNodesDict = {}

print("CURRENT PROCESSING NODE:", v)
print("___________________________")

for (c, weight) in self.get_neighbors(v):
feasibleChildNodeList = []
cost = self.getHeuristicNodeValue(c) + 1
feasibleChildNodeList.append(c)

andNodesList = self.getAndNodes(v)
for nodeTuple in andNodesList:
if c in nodeTuple:
for andNode in nodeTuple:
if andNode != c:
feasibleChildNodeList.append(andNode)
cost = cost + self.getHeuristicNodeValue(andNode) + 1

if minimumCost == None:
minimumCost = cost
for child in feasibleChildNodeList:
self.parent[child] = v
minimumCostFeasibleChildNodesDict[minimumCost] = feasibleChildNodeList
else:
if minimumCost > cost:
minimumCost = cost
for child in feasibleChildNodeList:
self.parent[child] = v
minimumCostFeasibleChildNodesDict[minimumCost] = feasibleChildNodeList

if minimumCost == None:
minimumCost = self.getHeuristicNodeValue(v)
self.solvedNodeList.add(v)
else:
self.setHeuristicNodeValue(v, minimumCost)

for child in minimumCostFeasibleChildNodesDict[minimumCost]:
if child not in self.solvedNodeList:
self.openList.add(child)
self.solutionGraph[v] = minimumCostFeasibleChildNodesDict[minimumCost]

solved = True
for c in self.solutionGraph.get(v, ''):
if c not in self.solvedNodeList:
solved = solved & False

if solved == True:
self.solvedNodeList.add(v)

print("HEURISTIC VALUES :", self.H)
print("OPEN LIST :", list(self.openList))
print("MINIMUM COST NODES:", minimumCostFeasibleChildNodesDict.get(minimumCost, "[ ]"))
print("SOLVED NODE LIST :", list(self.solvedNodeList))
print("-----------------------------------------------------------------------------------------")

def getAndNodes(self, v):
andNodes = {
'A': [('B', 'C')],
'D': [('E', 'F')]
}
return andNodes.get(v, '')

def getHeuristicNodeValue(self, n):
return self.H.get(n, 0)

def setHeuristicNodeValue(self, n, value):
self.H[n] = value

def ao_star_algorithm(self, start):
self.openList = set([start])

while len(self.openList) > 0:
v = self.openList.pop()
self.updateNode(v)

while v != start and self.parent[v] not in self.solvedNodeList:
parent = self.parent[v]
self.updateNode(parent)
v = parent

print("TRAVERSE SOLUTION FROM ROOT TO COMPUTE THE FINAL SOLUTION GRAPH")
print("---------------------------------------------------------------")
print("SOLUTION GRAPH:", self.solutionGraph)
print("n")


nodeList1 = {
'A': [('B', 1), ('C', 1), ('D', 1)],
'B': [('G', 1), ('H', 1)],
'C': [('J', 1)],
'D': [('E', 1), ('F', 1)],
'G': [('I', 1)]
}

nodeList = {
'A': [('B', 1), ('C', 1), ('D', 1)],
'B': [('G', 1), ('H', 1)],
'D': [('E', 1), ('F', 1)]
}

graph = Graph(nodeList)
graph.ao_star_algorithm('A')

1.
from collections import deque

class Graph:
def __init__(self, adjac_lis):
self.adjac_lis = adjac_lis

def get_neighbors(self, v):
return self.adjac_lis[v]

def h(self, n):
H = {
'A': 1,
'B': 1,
'C': 1,
'D': 1
}
return H[n]

def a_star_algorithm(self, start, stop):
open_lst = set([start])
closed_lst = set([])
poo = {}
poo[start] = 0
par = {}
par[start] = start

while len(open_lst) > 0:
n = None
for v in open_lst:
if n is None or poo[v] + self.h(v) < poo[n] + self.h(n):
n = v

if n is None:
print('Path does not exist!')
return None

if n == stop:
reconst_path = []
while par[n] != n:
reconst_path.append(n)
n = par[n]
reconst_path.append(start)
reconst_path.reverse()
print('Path found: {}'.format(reconst_path))
return reconst_path

for (m, weight) in self.get_neighbors(n):
if m not in open_lst and m not in closed_lst:
open_lst.add(m)
par[m] = n
poo[m] = poo[n] + weight
else:
if poo[m] > poo[n] + weight:
poo[m] = poo[n] + weight
par[m] = n

if m in closed_lst:
closed_lst.remove(m)
open_lst.add(m)

open_lst.remove(n)
closed_lst.add(n)

print('Path does not exist!')
return None
     
 
what is notes.io
 

Notes.io is a web-based application for taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000 notes created and continuing...

With notes.io;

  • * You can take a note from anywhere and any device with internet connection.
  • * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
  • * You can quickly share your contents without website, blog and e-mail.
  • * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
  • * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.

Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.

Easy: Notes.io doesn’t require installation. Just write and share note!

Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )

Free: Notes.io works for 12 years and has been free since the day it was started.


You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;


Email: [email protected]

Twitter: http://twitter.com/notesio

Instagram: http://instagram.com/notes.io

Facebook: http://facebook.com/notesio



Regards;
Notes.io Team

     
 
Shortened Note Link
 
 
Looding Image
 
     
 
Long File
 
 

For written notes was greater than 18KB Unable to shorten.

To be smaller than 18KB, please organize your notes, or sign in.