Skip to content

Commit

Permalink
update contributors
Browse files Browse the repository at this point in the history
  • Loading branch information
Limmen committed Jul 11, 2024
1 parent fa2ece0 commit 3ae714f
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 7 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,7 @@ Thanks go to these people!
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/kingxiaofire"><img src="https://github.com/kingxiaofire.png" width="100px;" alt="Yan Wang"/><br /><sub><b>Yan Wang</b></sub></a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/Awsnaser"><img src="https://github.com/Awsnaser.png" width="100px;" alt="Aws Jaber"/><br /><sub><b>Aws Jaber</b></sub></a></td>
</tr>
</tbody>
</table>
Expand Down
3 changes: 2 additions & 1 deletion docs/_docs/contributing.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,4 +36,5 @@ should be on the list but is not):
- Nils Forsgren, software development.
- Bength Roland Pappila, software development.
- Yu Hu, software development.
- Yan Wang, software development.
- Yan Wang, software development.
- Aws Jaber, software development.
17 changes: 12 additions & 5 deletions examples/eval/cyborg_scenario_two/eval_on_base_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,22 +13,29 @@
maximum_steps=100, red_agent_distribution=[1.0], reduced_action_space=True, decoy_state=True,
scanned_state=True, decoy_optimization=False, cache_visited_states=False)
csle_cyborg_env = CyborgScenarioTwoDefender(config=config)
num_evaluations = 10000
max_horizon = 100
num_evaluations = 1
max_horizon = 20
returns = []
seed = 215125
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
print("Starting policy evaluation")
# print(csle_cyborg_env.action_id_to_type_and_host)
# import sys
# sys.exit(0)
# print("Starting policy evaluation")
for i in range(num_evaluations):
o, _ = csle_cyborg_env.reset()
R = 0
t = 0
while t < max_horizon:
a = ppo_policy.action(o=o)
# a = ppo_policy.action(o=o)
a = 4
o, r, done, _, info = csle_cyborg_env.step(a)
table = csle_cyborg_env.get_true_table()
print(table)
print(r)
R += r
t += 1
returns.append(R)
print(f"{i}/{num_evaluations}, avg R: {np.mean(returns)}, R: {R}")
# print(f"{i}/{num_evaluations}, avg R: {np.mean(returns)}, R: {R}")
5 changes: 4 additions & 1 deletion examples/eval/cyborg_scenario_two/evaluate_on_wrapper_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@
import random
from csle_common.metastore.metastore_facade import MetastoreFacade
from gym_csle_cyborg.envs.cyborg_scenario_two_wrapper import CyborgScenarioTwoWrapper
from gym_csle_cyborg.dao.red_agent_type import RedAgentType
from gym_csle_cyborg.dao.csle_cyborg_wrapper_config import CSLECyborgWrapperConfig

if __name__ == '__main__':
ppo_policy = MetastoreFacade.get_ppo_policy(id=58)
config = CSLECyborgWrapperConfig(maximum_steps=100, gym_env_name="",
save_trace=False, reward_shaping=False, scenario=2)
save_trace=False, reward_shaping=False, scenario=2,
red_agent_type=RedAgentType.B_LINE_AGENT)
env = CyborgScenarioTwoWrapper(config=config)
num_evaluations = 10000
max_horizon = 100
Expand All @@ -25,6 +27,7 @@
while t < max_horizon:
a = ppo_policy.action(o=o)
o, r, done, _, info = env.step(a)
env.show
R += r
t += 1
returns.append(R)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -418,6 +418,7 @@ def test_get_snort_managers_info(
:param mock_get_ports:mock_get_ports
:param mock_get_ips: mock_get_ips
:return: None
"""
mock_get_ips.return_value = ["10.0.0.1", "10.0.0.2"]
Expand Down

0 comments on commit 3ae714f

Please sign in to comment.