-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #134 from VishwamAI/devin/cognitive-architecture-t…
…ests Fix shape mismatch in HOT model
- Loading branch information
Showing
19 changed files
with
1,364 additions
and
57 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,127 @@ | ||
import numpy as np | ||
import torch | ||
import torch.nn as nn | ||
import torch.optim as optim | ||
from collections import deque | ||
import random | ||
|
||
class BrowsingResearcherAgent: | ||
def __init__(self, state_size, action_size): | ||
self.state_size = state_size | ||
self.action_size = action_size | ||
self.memory = deque(maxlen=10000) | ||
self.gamma = 0.95 # discount rate | ||
self.epsilon = 1.0 # exploration rate | ||
self.epsilon_min = 0.01 | ||
self.epsilon_decay = 0.995 | ||
self.learning_rate = 0.001 | ||
self.model = self._build_model() | ||
self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) | ||
self.causal_model = self._build_causal_model() | ||
self.meta_model = self._build_meta_model() | ||
self.hierarchical_model = self._build_hierarchical_model() | ||
|
||
def _build_model(self): | ||
model = nn.Sequential( | ||
nn.Linear(self.state_size, 24), | ||
nn.ReLU(), | ||
nn.Linear(24, 24), | ||
nn.ReLU(), | ||
nn.Linear(24, self.action_size) | ||
) | ||
return model | ||
|
||
def _build_causal_model(self): | ||
return nn.Linear(self.state_size, self.action_size) | ||
|
||
def _build_meta_model(self): | ||
return nn.Linear(self.state_size, self.action_size) | ||
|
||
def _build_hierarchical_model(self): | ||
return nn.ModuleList([nn.Linear(self.state_size, self.action_size) for _ in range(3)]) | ||
|
||
def remember(self, state, action, reward, next_state, done): | ||
self.memory.append((state, action, reward, next_state, done)) | ||
|
||
def act(self, state): | ||
if np.random.rand() <= self.epsilon: | ||
return random.randrange(self.action_size) | ||
state = torch.FloatTensor(state).unsqueeze(0) | ||
act_values = self.model(state) | ||
causal_values = self.causal_model(state) | ||
meta_values = self.meta_model(state) | ||
hierarchical_values = sum(model(state) for model in self.hierarchical_model) | ||
combined_values = act_values + causal_values + meta_values + hierarchical_values | ||
return np.argmax(combined_values.detach().numpy()) | ||
|
||
def replay(self, batch_size): | ||
minibatch = random.sample(self.memory, batch_size) | ||
for state, action, reward, next_state, done in minibatch: | ||
target = reward | ||
if not done: | ||
next_state = torch.FloatTensor(next_state).unsqueeze(0) | ||
target = (reward + self.gamma * | ||
np.amax(self.model(next_state).detach().numpy())) | ||
state = torch.FloatTensor(state).unsqueeze(0) | ||
target_f = self.model(state) | ||
target_f[0][action] = target | ||
loss = nn.MSELoss()(self.model(state), target_f) | ||
causal_loss = nn.MSELoss()(self.causal_model(state), target_f) | ||
meta_loss = nn.MSELoss()(self.meta_model(state), target_f) | ||
hierarchical_loss = sum(nn.MSELoss()(model(state), target_f) for model in self.hierarchical_model) | ||
total_loss = loss + causal_loss + meta_loss + hierarchical_loss | ||
self.optimizer.zero_grad() | ||
total_loss.backward() | ||
self.optimizer.step() | ||
if self.epsilon > self.epsilon_min: | ||
self.epsilon *= self.epsilon_decay | ||
|
||
class BrowsingEnvironment: | ||
def __init__(self): | ||
self.current_page = None | ||
self.visited_pages = set() | ||
|
||
def reset(self): | ||
self.current_page = "start_page" | ||
self.visited_pages = set() | ||
return self._get_state() | ||
|
||
def step(self, action): | ||
next_page = self._simulate_browsing(action) | ||
reward = self._calculate_reward(next_page) | ||
self.current_page = next_page | ||
self.visited_pages.add(next_page) | ||
done = self._is_done() | ||
return self._get_state(), reward, done, {} | ||
|
||
def _simulate_browsing(self, action): | ||
return f"page_{action}" | ||
|
||
def _calculate_reward(self, page): | ||
return 1 if page not in self.visited_pages else 0 | ||
|
||
def _is_done(self): | ||
return len(self.visited_pages) >= 10 | ||
|
||
def _get_state(self): | ||
return [hash(self.current_page) % 100] | ||
|
||
def train_agent(episodes, batch_size=32): | ||
env = BrowsingEnvironment() | ||
agent = BrowsingResearcherAgent(state_size=1, action_size=10) | ||
|
||
for e in range(episodes): | ||
state = env.reset() | ||
for time in range(500): | ||
action = agent.act(state) | ||
next_state, reward, done, _ = env.step(action) | ||
agent.remember(state, action, reward, next_state, done) | ||
state = next_state | ||
if done: | ||
print(f"episode: {e}/{episodes}, score: {time}, e: {agent.epsilon:.2}") | ||
break | ||
if len(agent.memory) > batch_size: | ||
agent.replay(batch_size) | ||
|
||
if __name__ == "__main__": | ||
train_agent(episodes=100) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.