NotesWhat is notes.io?

Notes brand slogan

Notes - notes.io

"""
SECTION 1 : Load and setup data for training
"""

import csv
import random
import math
random.seed(123) #random using seed

# muat dataset
# with open('iris-dataset.csv') as csvfile:
with open('iris-dataset.csv') as csvfile:
csvreader = csv.reader(csvfile)
# next(csvreader, None) # skip header
dataset = list(csvreader)

# Change string value to numeric
for row in dataset:
row[4] = ["Iris-setosa", "Iris-versicolor", "Iris-virginica"].index(row[4]) # mengubah data string menjadi integer
row[:4] = [float(row[j]) for j in xrange(len(row))] #float untuk data sebelumnya dan integer data akhir

# Split x and y (feature and target)
random.shuffle(dataset) #random data
datatrain = dataset[:int(len(dataset) * 0.8)] #data train
datatest = dataset[int(len(dataset) * 0.8):] #data test
train_X = [data[:4] for data in datatrain]
train_y = [data[4] for data in datatrain]
test_X = [data[:4] for data in datatest]
test_y = [data[4] for data in datatest]

"""
SECTION 2 : Build and Train Model

Multilayer perceptron model, with one hidden layer.
input layer : 4 neuron, represents the feature of Iris
output layer : 3 neuron, represents the class of Iris

optimizer = gradient descent
loss function = Square Root Error
learning rate = 0.005
epoch = 400

best result = 96.67%
"""

def matrix_mul_bias(A, B, bias): # Fungsi perkalian matrix + bias (untuk Testing)
C = [[0 for i in xrange(len(B[0]))] for i in xrange(len(A))]
for i in xrange(len(A)):
for j in xrange(len(B[0])):
for k in xrange(len(B)):
C[i][j] += A[i][k] * B[k][j]
C[i][j] += bias[j]
return C

def vec_mat_bias(A, B, bias): # Fungsi perkalian vector dengan matrix + bias
C = [0 for i in xrange(len(B[0]))]
for j in xrange(len(B[0])):
for k in xrange(len(B)):
C[j] += A[k] * B[k][j]
C[j] += bias[j]
return C


def mat_vec(A, B): # Fungsi perkalian matrix dengan vector (untuk backprop)
C = [0 for i in xrange(len(A))]
for i in xrange(len(A)):
for j in xrange(len(B)):
C[i] += A[i][j] * B[j]
return C

def sigmoid(A, deriv=False): # Fungsi aktivasi sigmoid
for i in xrange(len(A)):
# A[i] = 1 / (1 + math.exp(-A[i]))
A[i] = (math.exp(A[i]) - math.exp(-A[i])) / (math.exp(A[i]) + math.exp(-A[i])) #hyperbolic tangent
return A

# Define parameter
alfa = 0.005
epoch = 500
#neuron = [4, 4, 3] # arsitektur tiap layer MLP
neuron = [4, 3] # arsitektur untuk SLP

# Initiate weight and bias with 0 value
weight = [[0 for j in xrange(neuron[1])] for i in xrange(neuron[0])] #weight at network between input and hidden
#weight_2 = [[0 for j in xrange(neuron[2])] for i in xrange(neuron[1])] #weight at network between hidden and output
bias = [0 for i in xrange(neuron[1])] #bias at network between input and hidden
#bias_2 = [0 for i in xrange(neuron[2])] #bias at network between hidden and ouput

# Initiate weight with random between -1.0 ... 1.0
for i in xrange(neuron[0]):
for j in xrange(neuron[1]):
weight[i][j] = 2 * random.random() - 1 # random 0-1, dikali 2 agar memiliki iterval -1 sampai 1

''' for i in xrange(neuron[1]):
for j in xrange(neuron[2]):
weight_2[i][j] = 2 * random.random() - 1 # random 0-1, dikali 2 agar memiliki iterval -1 sampai 1 '''


for e in xrange(epoch):
cost_total = 0
for idx, x in enumerate(train_X): # Update for each data; SGD

# Forward propagation
h_1 = vec_mat_bias(x, weight, bias)
X_1 = sigmoid(h_1)
# h_2 = vec_mat_bias(X_1, weight_2, bias_2)
# X_2 = sigmoid(h_2) # hasil/output dari setiap epoh

# Convert to One-hot target
target = [0, 0, 0]
target[int(train_y[idx])] = 1

# Cost function, Square Root Eror
eror = 0
for i in xrange(3):
# eror += 0.5 * (target[i] - X_2[i]) ** 2 #activation function using RSE
# eror += (target[i] - X_2[i]) ** 2 #activation function using SSE MLP
eror += (target[i] - X_1[i]) ** 2 #activation function using SSE SLP
cost_total += eror

# Backward propagation
# Update weight_2 and bias_2 (layer 2)
'''
delta_2 = []
for j in xrange(neuron[2]):
delta_2.append(-1 * (target[j]-X_2[j]) * (1+X_2[j]) * (1-X_2[j])) #turunan hyperbolic tangent


for i in xrange(neuron[1]):
for j in xrange(neuron[2]):
weight_2[i][j] -= alfa * (delta_2[j] * X_1[i])
bias_2[j] -= alfa * delta_2[j]
'''
# Update weight and bias (layer 1)
# delta_1 = mat_vec(weight_2, delta_2)
delta_1 = []
for j in xrange(neuron[1]):
# delta_1[j] = delta_1[j] * ((1+X_1[j]) * (1-X_1[j])) #turunan hyperbolic tangent
delta_1.append(-1 * (target[j]-X_1[j]) * (1+X_1[j]) * (1-X_1[j])) #turunan hyperbolic tangent

for i in xrange(neuron[0]):
for j in xrange(neuron[1]):
weight[i][j] -= alfa * (delta_1[j] * x[i])
bias[j] -= alfa * delta_1[j]

cost_total /= len(train_X)
if(e % 40 == 0):
print cost_total # Print cost untuk memantau training

"""
SECTION 3 : Testing
"""

res = matrix_mul_bias(test_X, weight, bias)
# res_2 = matrix_mul_bias(res, weight_2, bias)

# Get prediction
preds = []
for r in res:
preds.append(max(enumerate(r), key=lambda x:x[1])[0])

# Print prediction
print 'Output = ' ,preds

# Print Target
print 'Target = ' ,[int(y) for y in test_y]

# Calculate accuration
acc = 0.0
for i in xrange(len(preds)):
if preds[i] == int(test_y[i]):
acc += 1
print acc / len(preds) * 100, "%"
     
 
what is notes.io
 

Notes.io is a web-based application for taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000 notes created and continuing...

With notes.io;

  • * You can take a note from anywhere and any device with internet connection.
  • * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
  • * You can quickly share your contents without website, blog and e-mail.
  • * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
  • * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.

Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.

Easy: Notes.io doesn’t require installation. Just write and share note!

Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )

Free: Notes.io works for 12 years and has been free since the day it was started.


You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;


Email: [email protected]

Twitter: http://twitter.com/notesio

Instagram: http://instagram.com/notes.io

Facebook: http://facebook.com/notesio



Regards;
Notes.io Team

     
 
Shortened Note Link
 
 
Looding Image
 
     
 
Long File
 
 

For written notes was greater than 18KB Unable to shorten.

To be smaller than 18KB, please organize your notes, or sign in.