NotesWhat is notes.io?

Notes brand slogan

Notes - notes.io

import gensim
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np

class Tokenizer:

def __init__(self):
self.dictionary = {}
self.reverse_dictionary = {}

# Add the padding token
self.__add_to_dict('<pad>')

# Add characters and numbers to the dictionary
for i in range(10):
self.__add_to_dict(str(i))
for i in range(26):
self.__add_to_dict(chr(ord('a') + i))
self.__add_to_dict(chr(ord('A') + i))

# Add space and punctuation to the dictionary
self.__add_to_dict('.')
self.__add_to_dict(' ')
symbols = ['!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '/',
':', ';', '<', '=', '>', '?', '@', '[', '\' , ']', '^', '_', '`', '{', '|',
'}', '~','’', 'xa0', 'n']
for symbol in symbols:
self.__add_to_dict(symbol)


def __add_to_dict(self, character):
if character not in self.dictionary:
self.dictionary[character] = len(self.dictionary)
self.reverse_dictionary[self.dictionary[character]] = character

def tokenize(self, text):
return [self.dictionary[c] for c in text]

def character_to_token(self, character):
return self.dictionary[character]

def token_to_character(self, token):
return self.reverse_dictionary[token]

def size(self):
return len(self.dictionary)


# Define the environment

class CustomEnv:
def __init__(self, data,tokenizer):
self.data = data # Your data containing questions, correct, and incorrect responses
self.tokenizer = tokenizer
self.current_index = 0
self.max_steps = len(data) # Assuming one episode per data entry

def reset(self):
# Initialize the environment at the beginning of an episode
self.current_index = 0
state = {
"question": self.tokenizer.tokenize(self.data.iloc[self.current_index]['question']),
"correct_response": self.tokenizer.tokenize(self.data.iloc[self.current_index]['correct_response']),
"incorrect_response": self.tokenizer.tokenize(self.data.iloc[self.current_index]['incorrect_response']),
}
return state

def step(self, action):
# Take an action (e.g., select a response) and update the environment
# Calculate the reward based on the action and state
# Return the next state, reward, and whether the episode is done
if self.current_index < self.max_steps - 1:
self.current_index += 1
next_state = {
"question": self.tokenizer.tokenize(self.data.iloc[self.current_index]['question']),
"correct_response": self.tokenizer.tokenize(self.data.iloc[self.current_index]['correct_response']),
"incorrect_response": self.tokenizer.tokenize(self.data.iloc[self.current_index]['incorrect_response']),
}
reward = calculate_reward(action, next_state) # Implement your custom reward function
done = False
else:
# End of episode
next_state = None
reward = 0
done = True

return next_state, reward, done



pip install --upgrade torch

import torch
# import torch.distributions.Categorical

class PolicyNetwork(nn.Module):
def __init__(self, state_dim, action_dim):
super(PolicyNetwork, self).__init__()
self.fc = nn.Sequential(
nn.Linear(state_dim, 64),
nn.ReLU(),
nn.Linear(64, action_dim),
nn.Softmax(dim=-1)
)

def forward(self, state):
for key, value in state.items():
if isinstance(value, (int, float)):
state[key] = torch.tensor(value , dtype = torch.float32)
return self.fc(torch.tensor(list(state.values()), dtype = torch.float32))

def select_action(self, state):
# Forward pass through the policy network
action_probs = self.forward(state)
# Create a categorical distribution over the action probabilities
dist = Categorical(action_probs)
# Sample an action from the distribution
action = dist.sample()
# Return the selected action as an integer
return action.item()

def calculate_reward(action, state):
# Implement your custom reward logic here
# Example: Reward +1 for selecting the correct response, -1 for selecting the incorrect response, 0 otherwise
if action == "correct":
reward = 1
elif action == "incorrect":
reward = -1
else:
reward = 0
return reward

from pandas_ods_reader import read_ods
data = read_ods("/content/drive/MyDrive/sample_data/Fire_security.ods", columns=["question", "correct_response","incorrect_response"])


data = data.head(3)
data



# Initialize the environment and policy network
tokenizer = Tokenizer()
env = CustomEnv(data,tokenizer)
policy = PolicyNetwork(tokenizer.size(), 2) # Adapt the input and output dimensions
optimizer = optim.Adam(policy.parameters(), lr=0.01)






import gensim
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np

num_episodes = 10

# Training loop
for episode in range(num_episodes):
episode_states = []
episode_actions = []
episode_rewards = []

state = env.reset()

while True:
# Extract the question from the state (assuming state contains the question)
question_text = state["question"]
question_text = "".join([tokenizer.token_to_character(token) for token in question_text])
# Generate a response based on the input question_text
response_text = generate_response(question_text) # Call your generate_response function here
# Perform actions, get rewards, and transition to the next state
action = policy.select_action(state)
# Assuming you have a reward function that calculates reward given the action and state
reward = calculate_reward(action, state)
next_state, _, done = env.step(action)
episode_states.append(state)
episode_actions.append(action)
episode_rewards.append(reward)
state = next_state
if done:
break
# Compute discounted returns
discounted_returns = []
running_add = 0
for r in reversed(episode_rewards):
running_add = r + 0.99 * running_add # Discount factor: 0.99
discounted_returns.insert(0, running_add)
# Calculate loss and perform policy gradient update
action_probs = policy(torch.cat(episode_states))
selected_action_probs = torch.gather(action_probs, 1, torch.tensor(episode_actions).unsqueeze(1))
loss = -torch.sum(torch.log(selected_action_probs) * torch.FloatTensor(discounted_returns))
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print episode information
if episode % 10 == 0:
print(f"Episode {episode}, Total Reward: {np.sum(episode_rewards)}")
# # Use the trained policy for inference
# with torch.no_grad():
# test_state = env.reset()
# while True:
# # Extract the question from the state (assuming state contains the question)
# question_text = test_state["question"]
# print(question_text)
# # Generate a response based on the input question_text
# generate_response(question_text) # Call your generate_response function here
# test_state = torch.tensor([test_state], dtype=torch.float32)
# action_probs = policy(test_state)
# action = np.argmax(action_probs.detach().numpy()[0])
# next_state, _, done = env.step(action)
# test_state = next_state
# if done:
# break
state =

{'question': [56,
25,
11,
49,
64,
27,
47,
64,
21,
27,
45,
19,
64,
47,
11,
21,
19,
49,
59,
84],
'correct_response': [22,
27,
45,
19,
64,
47,
11,
21,
19,
49,
59,
64,
45,
19,
21,
19,
45,
47,
64,
49,
39,
64,
49,
25,
19,
64,
41,
45,
19,
15,
11,
51,
49,
27,
39,
37,
47,
76,
64,
41,
45,
39,
15,
19,
17,
51,
45,
19,
47,
76,
64,
11,
37,
17,
64,
35,
19,
11,
47,
51,
45,
19,
47,
64,
49,
11,
31,
19,
37,
64,
49,
39,
64,
41,
45,
19,
53,
19,
37,
49,
64,
21,
27,
45,
19,
47,
76,
64,
35,
27,
37,
27,
35,
27,
61,
19,
64,
45,
27,
47,
31,
47,
76,
64,
11,
37,
17,
64,
19,
37,
47,
51,
45,
19,
64,
49,
25,
19,
64,
47,
11,
21,
19,
49,
59,
64,
39,
21,
64,
27,
37,
17,
27,
53,
27,
17,
51,
11,
33,
47,
64,
11,
37,
17,
64,
41,
45,
39,
41,
19,
45,
49,
59,
64,
27,
37,
64,
15,
11,
47,
19,
64,
39,
21,
64,
11,
64,
21,
27,
45,
19,
64,
19,
35,
19,
45,
23,
19,
37,
15,
59,
63],
'incorrect_response': [71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
11,
41,
41,
45,
39,
11,
15,
25,
64,
11,
41,
41,
45,
39,
11,
15,
25,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
49,
25,
11,
37,
31,
64,
49,
25,
11,
37,
31,
64,
49,
25,
11,
37,
31,
64,
71,
45,
19,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
49,
25,
11,
37,
31,
64,
49,
25,
11,
37,
31,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
15,
39,
51,
33,
17,
64,
15,
39,
51,
33,
17,
64,
15,
39,
51,
33,
17,
64,
15,
39,
51,
33,
17,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
71,
45,
19,
64,
71,
45,
19,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
15,
39,
51,
33,
17,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
45,
27,
23,
25,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
45,
27,
23,
25,
49,
64,
45,
27,
23,
25,
49,
64,
45,
27,
23,
25,
49,
64,
45,
27,
23,
25,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
59,
19,
11,
45,
47,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
11,
55,
11,
59,
64,
11,
55,
11,
59,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
71,
45,
19,
64,
21,
27,
57,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
64,
29,
51,
47,
49,
98]}
     
 
what is notes.io
 

Notes.io is a web-based application for taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000 notes created and continuing...

With notes.io;

  • * You can take a note from anywhere and any device with internet connection.
  • * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
  • * You can quickly share your contents without website, blog and e-mail.
  • * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
  • * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.

Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.

Easy: Notes.io doesn’t require installation. Just write and share note!

Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )

Free: Notes.io works for 12 years and has been free since the day it was started.


You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;


Email: [email protected]

Twitter: http://twitter.com/notesio

Instagram: http://instagram.com/notes.io

Facebook: http://facebook.com/notesio



Regards;
Notes.io Team

     
 
Shortened Note Link
 
 
Looding Image
 
     
 
Long File
 
 

For written notes was greater than 18KB Unable to shorten.

To be smaller than 18KB, please organize your notes, or sign in.