diff --git a/docs/games.md b/docs/games.md index 6f0625c252..ef3720f104 100644 --- a/docs/games.md +++ b/docs/games.md @@ -75,6 +75,7 @@ Status | Game ~ | [Slovenian Tarok](#slovenian-tarok) ~ | [Skat (simplified bidding)](#skat-simplified-bidding) ~ | [Solitaire (K+)](#solitaire-k) +~ | [Squadro](#squadro) ![](_static/green_circ10.png "green circle") | [Tic-Tac-Toe](#tic-tac-toe) ![](_static/green_circ10.png "green circle") | [Tiny Bridge](#tiny-bridge) ![](_static/green_circ10.png "green circle") | [Tiny Hanabi](#tiny-hanabi) @@ -777,6 +778,16 @@ Status | Game * [Wikipedia](https://en.wikipedia.org/wiki/Klondike_\(solitaire\)) and [Bjarnason et al. '07, Searching solitaire in real time](http://web.engr.oregonstate.edu/~afern/papers/solitaire.pdf) +### Squadro + +* Each turn, players can move their one of their pieces on the board. +* Uses tokens on a grid. +* Modern game. +* Deterministic. +* Perfect information. +* 2 players. +* [bgg](https://boardgamegeek.com/boardgame/245222/squadro) + ### Tic-Tac-Toe * Players place tokens to try and form a pattern. diff --git a/open_spiel/games/CMakeLists.txt b/open_spiel/games/CMakeLists.txt index b0a3cb0018..8f536186ae 100644 --- a/open_spiel/games/CMakeLists.txt +++ b/open_spiel/games/CMakeLists.txt @@ -157,6 +157,8 @@ set(GAME_SOURCES skat.h solitaire.cc solitaire.h + squadro.cc + squadro.h stones_and_gems.cc stones_and_gems.h tarok.cc @@ -560,6 +562,10 @@ add_executable(solitaire_test solitaire_test.cc ${OPEN_SPIEL_OBJECTS} $) add_test(solitaire_test solitaire_test) +add_executable(squadro_test squadro_test.cc ${OPEN_SPIEL_OBJECTS} + $) +add_test(squadro_test squadro_test) + add_executable(stones_and_gems_test stones_and_gems_test.cc ${OPEN_SPIEL_OBJECTS} $) diff --git a/open_spiel/games/squadro.cc b/open_spiel/games/squadro.cc new file mode 100644 index 0000000000..f42eaf3b50 --- /dev/null +++ b/open_spiel/games/squadro.cc @@ -0,0 +1,310 @@ +// Copyright 2019 DeepMind Technologies Limited +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "open_spiel/games/squadro.h" + +#include +#include +#include + +#include "open_spiel/utils/tensor_view.h" + +namespace open_spiel { +namespace squadro { +namespace { + +// Facts about the game +const GameType kGameType{ + /*short_name=*/"squadro", + /*long_name=*/"Squadro", + GameType::Dynamics::kSequential, + GameType::ChanceMode::kDeterministic, + GameType::Information::kPerfectInformation, + GameType::Utility::kZeroSum, + GameType::RewardModel::kTerminal, + /*max_num_players=*/2, + /*min_num_players=*/2, + /*provides_information_state_string=*/true, + /*provides_information_state_tensor=*/false, + /*provides_observation_string=*/true, + /*provides_observation_tensor=*/true, + /*parameter_specification=*/{} // no parameters +}; + +std::shared_ptr Factory(const GameParameters& params) { + return std::shared_ptr(new SquadroGame(params)); +} + +REGISTER_SPIEL_GAME(kGameType, Factory); + +std::string PlayerToString(Player player) { + switch (player) { + case 0: + return "P0"; + case 1: + return "P1"; + default: + SpielFatalError(absl::StrCat("Invalid player id ", player)); + } +} + +std::string CellToString(int row, int col, + const std::array, kNumPlayers>& board) { + if (board[0][col].position == row) { + if (board[0][col].direction == TokenState::forward) { + return "^"; + } else if (board[0][col].direction == TokenState::backward) { + return "v"; + } + } + if (board[1][row].position == col) { + if (board[1][row].direction == TokenState::forward) { + return ">"; + } else if (board[1][row].direction == TokenState::backward) { + return "<"; + } + } + return "."; +} + +} // namespace + +int SquadroState::CurrentPlayer() const { + if (IsTerminal()) { + return kTerminalPlayerId; + } else { + return current_player_; + } +} + +bool SquadroState::OverpassOpponent(int opponent, int player_position, Action move) { + bool overpassOpponent = false; + + while (board_[opponent][player_position].position == move + 1) { + overpassOpponent = true; + // Send opponent player back to default position + if (board_[opponent][player_position].direction == TokenState::forward) { + board_[opponent][player_position].position = 0; + } else { + board_[opponent][player_position].position = 6; + } + player_position += board_[current_player_][move + 1].direction == TokenState::forward ? 1 : -1; + } + board_[current_player_][move + 1].position = player_position; + return overpassOpponent; +} + +void SquadroState::DoApplyAction(Action move) { + int playerPosition = board_[current_player_][move + 1].position; + TokenState playerDirection = board_[current_player_][move + 1].direction; + int steps = playerDirection == TokenState::forward ? movements_[current_player_][move].forward : movements_[current_player_][move].backward; + int other_player = 1 - current_player_; + bool overpassOpponent = false; + int unit_move = playerDirection == TokenState::forward ? 1 : -1; + int finalPosition = playerPosition + steps; + + do { + playerPosition += unit_move; + board_[current_player_][move + 1].position = playerPosition; + overpassOpponent = OverpassOpponent(other_player, playerPosition, move); + } while (playerPosition > 0 && playerPosition < kRows - 1 && unit_move * playerPosition < unit_move * finalPosition && !overpassOpponent); + + if (board_[current_player_][move + 1].position == 0) { + board_[current_player_][move + 1].direction = TokenState::missing; // Token removed from the board + ++missing_tokens_[current_player_]; + } else if (board_[current_player_][move + 1].position == 6) { + board_[current_player_][move + 1].direction = TokenState::backward; // Invert token direction when it reaches the end of the board + } + + if (missing_tokens_[current_player_] == 4) { + outcome_ = current_player_ == 0 ? Outcome::kPlayer1 : Outcome::kPlayer2; + } + + ++moves_made_; + if (moves_made_ >= 200) { + outcome_ = Outcome::kDraw; + } + + current_player_ = other_player; +} + +std::vector SquadroState::LegalActions() const { + std::vector moves; + if (IsTerminal()) return moves; + for (int pos = 0; pos < kNumActions; ++pos) { + if (board_[current_player_][pos + 1].direction != TokenState::missing) moves.push_back(pos); + } + return moves; +} + +std::string SquadroState::ActionToString(Player player, + Action action_id) const { + return absl::StrCat(PlayerToString(player), action_id); +} + +SquadroState::SquadroState(std::shared_ptr game) + : State(game) { + for (int player = 0; player <= 1; ++player) { + for (int pos = 0; pos < kRows; ++pos) { + if (pos == 0 || pos == 6) { + // There are only 5 pieces per player. No pieces are present in the corners. + board_[player][pos] = {0, TokenState::missing}; + } else { + board_[player][pos] = {0, TokenState::forward}; + } + } + } +} + +std::string SquadroState::ToString() const { + std::string str; + for (int row = kRows - 1; row >= 0; --row) { + for (int col = 0; col < kCols; ++col) { + str.append(CellToString(row, col, board_)); + } + str.append("\n"); + } + str.append("C"); + int current_player = CurrentPlayer(); + if (current_player == kTerminalPlayerId) { + str.append("2"); + } else { + str.append(std::to_string(CurrentPlayer())); + } + return str; +} + +bool SquadroState::IsTerminal() const { + return outcome_ != Outcome::kUnknown; +} + +std::vector SquadroState::Returns() const { + if (outcome_ == Outcome::kPlayer1) return {1.0, -1.0}; + if (outcome_ == Outcome::kPlayer2) return {-1.0, 1.0}; + return {0.0, 0.0}; +} + +std::string SquadroState::InformationStateString(Player player) const { + SPIEL_CHECK_GE(player, 0); + SPIEL_CHECK_LT(player, num_players_); + return HistoryString(); +} + +std::string SquadroState::ObservationString(Player player) const { + SPIEL_CHECK_GE(player, 0); + SPIEL_CHECK_LT(player, num_players_); + return ToString(); +} + +int SquadroState::CellToInt(int row, int col) const{ + std::string str = CellToString(row, col, board_); + return cell_state_map_.at(str); +} + +void SquadroState::ObservationTensor(Player player, + absl::Span values) const { + SPIEL_CHECK_GE(player, 0); + SPIEL_CHECK_LT(player, num_players_); + + TensorView<2> view(values, {kCellStates, kRows*kCols}, true); + for (int row = kRows - 1; row >= 0; --row) { + for (int col = 0; col < kCols; ++col) { + int cell = row * kCols + col; + view[{CellToInt(row, col), cell}] = 1.0; + } + } + +} + +std::unique_ptr SquadroState::Clone() const { + return std::unique_ptr(new SquadroState(*this)); +} + +SquadroGame::SquadroGame(const GameParameters& params) + : Game(kGameType, params) {} + +SquadroState::SquadroState(std::shared_ptr game, + const std::string& str) + : State(game) { + + for (int player = 0; player <= 1; ++player) { + for (int pos = 0; pos < kRows; ++pos) { + board_[player][pos] = {0, TokenState::missing}; + } + } + + int xs = 0; + int os = 0; + int r = 6; + int c = 0; + for (const char ch : str) { + switch (ch) { + case '.': + break; + case '^': + board_[0][c].position = r; + board_[0][c].direction = TokenState::forward; + break; + case '>': + board_[1][r].position = c; + board_[1][r].direction = TokenState::forward; + break; + case 'v': + board_[0][c].position = r; + board_[0][c].direction = TokenState::backward; + break; + case '<': + board_[1][r].position = c; + board_[1][r].direction = TokenState::backward; + break; + case '0': + current_player_ = 0; + break; + case '1': + current_player_ = 1; + break; + case '2': + current_player_ = kTerminalPlayerId; + break; + } + if (ch == '.' || ch == '^' || ch == '>' || ch == 'v' || ch == '<' || ch == 'C') { + ++c; + if (c >= kCols) { + r--; + c = 0; + } + } + } + SPIEL_CHECK_TRUE(r == -1 && ("Problem parsing state (incorrect rows).")); + SPIEL_CHECK_TRUE(c == 1 && + ("Problem parsing state (column value should be 0)")); + + int count_p0_tokens = 0; + int count_p1_tokens = 0; + for (int i = 0; i < kNumActions; ++i) { + count_p0_tokens += board_[0][i + 1].direction == TokenState::missing ? 0 : 1; + count_p1_tokens += board_[1][i + 1].direction == TokenState::missing ? 0 : 1; + } + + if (count_p0_tokens == 1) { + outcome_ = Outcome::kPlayer1; + } else if (count_p1_tokens == 1) { + outcome_ = Outcome::kPlayer2; + } + SPIEL_CHECK_FALSE(count_p0_tokens == 1 && count_p1_tokens == 1 && + ("P1 and P2 cannot both have a single piece.")); +} + +} // namespace squadro +} // namespace open_spiel diff --git a/open_spiel/games/squadro.h b/open_spiel/games/squadro.h new file mode 100644 index 0000000000..1ed38169c0 --- /dev/null +++ b/open_spiel/games/squadro.h @@ -0,0 +1,173 @@ +// Copyright 2019 DeepMind Technologies Limited +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OPEN_SPIEL_GAMES_SQUADRO_H_ +#define OPEN_SPIEL_GAMES_SQUADRO_H_ + +#include +#include +#include +#include +#include + +#include "open_spiel/spiel.h" + +// Implementation of the game Squadro. +// Squadro is an abstract, perfect information 2-players game. Each player +// controls 5 pieces placed in a 7x7 board. A player can can only move their +// pieces vertically or horizontally (players move perpendicularily to each +// other). A piece can only move in one direction (forward or backward). +// A player can move a piece at the time, and the length of the +// movement is indicated on the side of the board. Each piece has to reach the +// end of the board, and come back to the starting place. Whenever a piece +// returns at the start is removed from the board. The player who remains with +// only 1 piece out of 5 in the board wins. When a piece jumps over the an +// opponent's one, it moves by an additional step, and the opponent's piece +// returns at the start of the row (not where the game starts, but the +// beginning of the backward row if it has already done one forward pass). +// +// Parameters: none + +namespace open_spiel { +namespace squadro { + +// Constants. +inline constexpr int kNumPlayers = 2; +inline constexpr int kRows = 7; +inline constexpr int kCols = 7; +inline constexpr int kNumCells = kRows * kCols; +inline constexpr int kNumActions = 5; +inline constexpr int kCellStates = + 1 + kNumPlayers * 2; // player 0 (forward), player 0 (backward), player 1 (forward), player 1 (backward), empty + +// Outcome of the game. +enum class Outcome { + kPlayer1 = 0, // Black + kPlayer2 = 1, // White + kUnknown, + kDraw +}; + +// State of a cell. +enum class CellState { + kEmpty, + kBlackForward, + kBlackBackward, + kWhiteForward, + kWhiteBackward, +}; + +// State of a token. +enum class TokenState { + forward, + backward, + missing, +}; + +struct Position { + int position; // between 0 and 6 + TokenState direction; +}; + +struct Movement { + const int forward; + const int backward; +}; + +// State of an in-play game. +class SquadroState : public State { + public: + SquadroState(std::shared_ptr); + explicit SquadroState(std::shared_ptr game, + const std::string& str); + SquadroState(const SquadroState& other) = default; + + Player CurrentPlayer() const override; + std::vector LegalActions() const override; + std::string ActionToString(Player player, Action action_id) const override; + std::string ToString() const override; + bool IsTerminal() const override; + std::vector Returns() const override; + std::string InformationStateString(Player player) const override; + std::string ObservationString(Player player) const override; + void ObservationTensor(Player player, + absl::Span values) const override; + std::unique_ptr Clone() const override; + std::vector ActionsConsistentWithInformationFrom( + Action action) const override { + return {action}; + } + std::unique_ptr ResampleFromInfostate( + int player_id, std::function rng) const override { + return Clone(); + } + + protected: + void DoApplyAction(Action move) override; + + private: + int CellToInt(int row, int col) const; + int moves_made_ = 0; + Player current_player_ = 0; // Player zero goes first + Outcome outcome_ = Outcome::kUnknown; + std::array, kNumPlayers> board_; + std::array, kNumPlayers> movements_{{ + { {{3, -1}, {1, -3}, {2, -2}, {1, -3}, {3, -1}} }, + { {{1, -3}, {3, -1}, {2, -2}, {3, -1}, {1, -3}} } + }}; + std::map cell_state_map_{{".", 0}, {"^", 1}, {">", 2}, {"v", 3}, {"<", 4}}; + std::array missing_tokens_{0, 0}; + bool OverpassOpponent(int opponent, int player_position, Action move); +}; + +// Game object. +class SquadroGame : public Game { + public: + explicit SquadroGame(const GameParameters& params); + int NumDistinctActions() const override { return kNumActions; } + std::unique_ptr NewInitialState() const override { + return std::unique_ptr(new SquadroState(shared_from_this())); + } + int NumPlayers() const override { return kNumPlayers; } + double MinUtility() const override { return -1; } + double UtilitySum() const override { return 0; } + double MaxUtility() const override { return 1; } + std::vector ObservationTensorShape() const override { + return {kCellStates, kRows, kCols}; + } + // Arbitrarily chosen number to ensure the game is finite. + int MaxGameLength() const override { return 200; } +}; + +inline std::ostream& operator<<(std::ostream& stream, const CellState& state) { + switch (state) { + case CellState::kEmpty: + return stream << "Empty"; + case CellState::kBlackForward: + return stream << "^"; + case CellState::kBlackBackward: + return stream << "v"; + case CellState::kWhiteForward: + return stream << ">"; + case CellState::kWhiteBackward: + return stream << "<"; + default: + SpielFatalError("Unknown cell state"); + } +} + +} // namespace squadro +} // namespace open_spiel + +#endif // OPEN_SPIEL_GAMES_SQUADRO_H_ diff --git a/open_spiel/games/squadro_test.cc b/open_spiel/games/squadro_test.cc new file mode 100644 index 0000000000..e351e88a49 --- /dev/null +++ b/open_spiel/games/squadro_test.cc @@ -0,0 +1,175 @@ +// Copyright 2019 DeepMind Technologies Limited +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "open_spiel/games/squadro.h" + +#include "open_spiel/spiel.h" +#include "open_spiel/spiel_utils.h" +#include "open_spiel/tests/basic_tests.h" + +namespace open_spiel { +namespace squadro { +namespace { + +namespace testing = open_spiel::testing; + +void BasicSquadroTests() { + testing::LoadGameTest("squadro"); + testing::NoChanceOutcomesTest(*LoadGame("squadro")); +} + +void InvertDirection() { + std::shared_ptr game = LoadGame("squadro"); + SquadroState state(game, + "....v..\n" + ">....v.\n" + ".^^.<..\n" + "....>..\n" + "......<\n" + "....>..\n" + "...^...\n" + "C0\n"); + + state.ApplyAction(0); + SPIEL_CHECK_EQ(state.ToString(), + ".v..v..\n" + ">....v.\n" + "..^.<..\n" + "....>..\n" + "......<\n" + "....>..\n" + "...^...\n" + "C1"); + state.ApplyAction(2); + SPIEL_CHECK_EQ(state.ToString(), + ".v..v..\n" + ">....v.\n" + "..^.<..\n" + "......<\n" + "......<\n" + "....>..\n" + "...^...\n" + "C0"); +} + +void JumpOpponentTokens() { + std::shared_ptr game = LoadGame("squadro"); + SquadroState state(game, + ".......\n" + "....>v.\n" + ".v^.<..\n" + "....>..\n" + "...>^..\n" + "....>..\n" + "...^...\n" + "C1\n"); + state.ApplyAction(4); + SPIEL_CHECK_EQ(state.ToString(), + ".....v.\n" + "......<\n" + ".v^.<..\n" + "....>..\n" + "...>^..\n" + "....>..\n" + "...^...\n" + "C0"); + state.ApplyAction(3); + SPIEL_CHECK_EQ(state.ToString(), + ".....v.\n" + "....^.<\n" + ".v^...<\n" + ">......\n" + "...>...\n" + "....>..\n" + "...^...\n" + "C1"); + state.ApplyAction(4); + SPIEL_CHECK_EQ(state.ToString(), + ".....v.\n" + "...<...\n" + ".v^...<\n" + ">......\n" + "...>...\n" + "....>..\n" + "...^^..\n" + "C0"); + state.ApplyAction(3); + SPIEL_CHECK_EQ(state.ToString(), + ".....v.\n" + "...<...\n" + ".v^...<\n" + ">......\n" + "...>^..\n" + ">......\n" + "...^...\n" + "C1"); + state.ApplyAction(3); + state.ApplyAction(2); + state.ApplyAction(3); + state.ApplyAction(4); + state.ApplyAction(3); + state.ApplyAction(4); + state.ApplyAction(3); + SPIEL_CHECK_EQ(state.ToString(), + ".v.....\n" + "...<...\n" + ".....v.\n" + ">..^...\n" + ">...^..\n" + ">......\n" + "..^....\n" + "C0"); +} + +void CheckTerminal() { + std::shared_ptr game = LoadGame("squadro"); + SquadroState state(game, + ".......\n" + "....>..\n" + "...^<..\n" + "..<....\n" + "......<\n" + ".>...v.\n" + ".......\n" + "C0\n"); + state.ApplyAction(3); + SPIEL_CHECK_EQ(state.ToString(), + ".......\n" + "....>..\n" + "...^<..\n" + "..<....\n" + "......<\n" + ".>.....\n" + ".......\n" + "C2"); + SPIEL_CHECK_TRUE(state.IsTerminal()); +} + +void BasicSerializationTest() { + std::shared_ptr game = LoadGame("squadro"); + std::unique_ptr state = game->NewInitialState(); + std::unique_ptr state2 = game->DeserializeState(state->Serialize()); + SPIEL_CHECK_EQ(state->ToString(), state2->ToString()); +} + +} // namespace +} // namespace squadro +} // namespace open_spiel + +int main(int argc, char **argv) { + open_spiel::squadro::BasicSquadroTests(); + open_spiel::squadro::InvertDirection(); + open_spiel::squadro::JumpOpponentTokens(); + open_spiel::squadro::BasicSerializationTest(); +} \ No newline at end of file diff --git a/open_spiel/integration_tests/playthroughs/squadro.txt b/open_spiel/integration_tests/playthroughs/squadro.txt new file mode 100644 index 0000000000..adcfa87e50 --- /dev/null +++ b/open_spiel/integration_tests/playthroughs/squadro.txt @@ -0,0 +1,821 @@ +game: squadro + +GameType.chance_mode = ChanceMode.DETERMINISTIC +GameType.dynamics = Dynamics.SEQUENTIAL +GameType.information = Information.PERFECT_INFORMATION +GameType.long_name = "Squadro" +GameType.max_num_players = 2 +GameType.min_num_players = 2 +GameType.parameter_specification = [] +GameType.provides_information_state_string = True +GameType.provides_information_state_tensor = False +GameType.provides_observation_string = True +GameType.provides_observation_tensor = True +GameType.provides_factored_observation_string = False +GameType.reward_model = RewardModel.TERMINAL +GameType.short_name = "squadro" +GameType.utility = Utility.ZERO_SUM + +NumDistinctActions() = 7 +PolicyTensorShape() = [7] +MaxChanceOutcomes() = 0 +GetParameters() = {} +NumPlayers() = 2 +MinUtility() = -1.0 +MaxUtility() = 1.0 +UtilitySum() = 0.0 +ObservationTensorShape() = [5, 7, 7] +ObservationTensorLayout() = TensorLayout.CHW +ObservationTensorSize() = 245 +MaxGameLength() = 200 +ToString() = "squadro()" + +# State 0 +# ....... +# >...... +# >...... +# >...... +# >...... +# >...... +# .^^^^^. +# C0 +IsTerminal() = False +History() = [] +HistoryString() = "" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 0 +InformationStateString(0) = "" +InformationStateString(1) = "" +ObservationString(0) = ".......\n>......\n>......\n>......\n>......\n>......\n.^^^^^.\nC0" +ObservationString(1) = ".......\n>......\n>......\n>......\n>......\n>......\n.^^^^^.\nC0" +ObservationTensor(0): +◉◯◯◯◯◯◉ ◯◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◯◯◯◯◉ ◯◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P00", "P01", "P02", "P03", "P04"] + +# Apply action "P02" +action: 2 + +# State 1 +# ....... +# >...... +# >...... +# >...... +# >..^... +# >...... +# .^^.^^. +# C1 +IsTerminal() = False +History() = [2] +HistoryString() = "2" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 1 +InformationStateString(0) = "2" +InformationStateString(1) = "2" +ObservationString(0) = ".......\n>......\n>......\n>......\n>..^...\n>......\n.^^.^^.\nC1" +ObservationString(1) = ".......\n>......\n>......\n>......\n>..^...\n>......\n.^^.^^.\nC1" +ObservationTensor(0): +◉◯◯◉◯◯◉ ◯◉◉◯◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◯◉◉◉ ◯◯◯◉◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◯◉◯◯◉ ◯◉◉◯◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◯◉◉◉ ◯◯◯◉◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P10", "P11", "P12", "P13", "P14"] + +# Apply action "P11" +action: 1 + +# State 2 +# ....... +# >...... +# >...... +# >...... +# ....>.. +# >...... +# .^^^^^. +# C0 +IsTerminal() = False +History() = [2, 1] +HistoryString() = "2, 1" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 0 +InformationStateString(0) = "2, 1" +InformationStateString(1) = "2, 1" +ObservationString(0) = ".......\n>......\n>......\n>......\n....>..\n>......\n.^^^^^.\nC0" +ObservationString(1) = ".......\n>......\n>......\n>......\n....>..\n>......\n.^^^^^.\nC0" +ObservationTensor(0): +◉◯◯◯◯◯◉ ◯◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◯◯◯◯◉ ◯◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P00", "P01", "P02", "P03", "P04"] + +# Apply action "P03" +action: 3 + +# State 3 +# ....... +# >...... +# >...... +# >...... +# ....>.. +# >...^.. +# .^^^.^. +# C1 +IsTerminal() = False +History() = [2, 1, 3] +HistoryString() = "2, 1, 3" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 1 +InformationStateString(0) = "2, 1, 3" +InformationStateString(1) = "2, 1, 3" +ObservationString(0) = ".......\n>......\n>......\n>......\n....>..\n>...^..\n.^^^.^.\nC1" +ObservationString(1) = ".......\n>......\n>......\n>......\n....>..\n>...^..\n.^^^.^.\nC1" +ObservationTensor(0): +◉◯◯◯◉◯◉ ◯◉◉◉◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◯◉◉ ◯◯◯◯◉◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◯◯◉◯◉ ◯◉◉◉◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◯◉◉ ◯◯◯◯◉◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P10", "P11", "P12", "P13", "P14"] + +# Apply action "P10" +action: 0 + +# State 4 +# ....... +# >...... +# >...... +# >...... +# ....>.. +# .>..^.. +# .^^^.^. +# C0 +IsTerminal() = False +History() = [2, 1, 3, 0] +HistoryString() = "2, 1, 3, 0" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 0 +InformationStateString(0) = "2, 1, 3, 0" +InformationStateString(1) = "2, 1, 3, 0" +ObservationString(0) = ".......\n>......\n>......\n>......\n....>..\n.>..^..\n.^^^.^.\nC0" +ObservationString(1) = ".......\n>......\n>......\n>......\n....>..\n.>..^..\n.^^^.^.\nC0" +ObservationTensor(0): +◉◯◯◯◉◯◉ ◯◉◉◉◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◯◉◉◯◉◉ ◯◯◯◯◉◯◯ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◯◯◉◯◉ ◯◉◉◉◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◯◉◉◯◉◉ ◯◯◯◯◉◯◯ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P00", "P01", "P02", "P03", "P04"] + +# Apply action "P03" +action: 3 + +# State 5 +# ....... +# >...... +# >...... +# >...^.. +# >...... +# .>..... +# .^^^.^. +# C1 +IsTerminal() = False +History() = [2, 1, 3, 0, 3] +HistoryString() = "2, 1, 3, 0, 3" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 1 +InformationStateString(0) = "2, 1, 3, 0, 3" +InformationStateString(1) = "2, 1, 3, 0, 3" +ObservationString(0) = ".......\n>......\n>......\n>...^..\n>......\n.>.....\n.^^^.^.\nC1" +ObservationString(1) = ".......\n>......\n>......\n>...^..\n>......\n.>.....\n.^^^.^.\nC1" +ObservationTensor(0): +◉◯◯◯◉◯◉ ◯◉◉◉◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◯◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◯◉◉ ◯◯◯◯◉◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◯◯◉◯◉ ◯◉◉◉◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◯◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◯◉◉ ◯◯◯◯◉◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P10", "P11", "P12", "P13", "P14"] + +# Apply action "P12" +action: 2 + +# State 6 +# Apply action "P03" +action: 3 + +# State 7 +# Apply action "P11" +action: 1 + +# State 8 +# Apply action "P03" +action: 3 + +# State 9 +# Apply action "P11" +action: 1 + +# State 10 +# Apply action "P01" +action: 1 + +# State 11 +# Apply action "P11" +action: 1 + +# State 12 +# Apply action "P02" +action: 2 + +# State 13 +# Apply action "P11" +action: 1 + +# State 14 +# Apply action "P02" +action: 2 + +# State 15 +# Apply action "P13" +action: 3 + +# State 16 +# Apply action "P01" +action: 1 + +# State 17 +# Apply action "P10" +action: 0 + +# State 18 +# Apply action "P04" +action: 4 + +# State 19 +# Apply action "P11" +action: 1 + +# State 20 +# ....... +# >...^.. +# ....>.. +# ..>..^. +# ..^<... +# ..>.... +# .^.^... +# C0 +IsTerminal() = False +History() = [2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1] +HistoryString() = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 0 +InformationStateString(0) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1" +InformationStateString(1) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1" +ObservationString(0) = ".......\n>...^..\n....>..\n..>..^.\n..^<...\n..>....\n.^.^...\nC0" +ObservationString(1) = ".......\n>...^..\n....>..\n..>..^.\n..^<...\n..>....\n.^.^...\nC0" +ObservationTensor(0): +◉◯◉◯◉◉◉ ◯◉◯◉◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◯◉◉◉ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◉◯◯◯ +◉◉◯◉◉◯◉ ◯◯◯◯◯◉◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◯◉◉ ◯◯◯◯◉◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◉◯◉◉◉ ◯◉◯◉◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◯◉◉◉ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◉◯◯◯ +◉◉◯◉◉◯◉ ◯◯◯◯◯◉◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◯◉◉ ◯◯◯◯◉◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P00", "P01", "P02", "P03", "P04"] + +# Apply action "P02" +action: 2 + +# State 21 +# ....... +# >...^.. +# ....>.. +# ..>^.^. +# ..^...< +# ..>.... +# .^..... +# C1 +IsTerminal() = False +History() = [2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2] +HistoryString() = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 1 +InformationStateString(0) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2" +InformationStateString(1) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2" +ObservationString(0) = ".......\n>...^..\n....>..\n..>^.^.\n..^...<\n..>....\n.^.....\nC1" +ObservationString(1) = ".......\n>...^..\n....>..\n..>^.^.\n..^...<\n..>....\n.^.....\nC1" +ObservationTensor(0): +◉◯◉◉◉◉◉ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◉◉◉◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◉◉◯◯◉◯◉ ◯◯◯◉◯◉◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◯◉◉ ◯◯◯◯◉◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◉◉◉◉◉ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◉◉◉◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◉◉◯◯◉◯◉ ◯◯◯◉◯◉◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◯◉◉◉◯◉◉ ◯◯◯◯◉◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P10", "P11", "P12", "P13", "P14"] + +# Apply action "P11" +action: 1 + +# State 22 +# Apply action "P01" +action: 1 + +# State 23 +# Apply action "P13" +action: 3 + +# State 24 +# Apply action "P03" +action: 3 + +# State 25 +# Apply action "P12" +action: 2 + +# State 26 +# Apply action "P01" +action: 1 + +# State 27 +# Apply action "P12" +action: 2 + +# State 28 +# Apply action "P02" +action: 2 + +# State 29 +# Apply action "P11" +action: 1 + +# State 30 +# Apply action "P00" +action: 0 + +# State 31 +# Apply action "P10" +action: 0 + +# State 32 +# Apply action "P03" +action: 3 + +# State 33 +# Apply action "P12" +action: 2 + +# State 34 +# Apply action "P03" +action: 3 + +# State 35 +# Apply action "P11" +action: 1 + +# State 36 +# Apply action "P04" +action: 4 + +# State 37 +# Apply action "P10" +action: 0 + +# State 38 +# Apply action "P02" +action: 2 + +# State 39 +# Apply action "P10" +action: 0 + +# State 40 +# .....v. +# >.^.... +# ...^..< +# ..>.... +# .....<. +# .....>. +# .^..... +# C0 +IsTerminal() = False +History() = [2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0] +HistoryString() = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 0 +InformationStateString(0) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0" +InformationStateString(1) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0" +ObservationString(0) = ".....v.\n>.^....\n...^..<\n..>....\n.....<.\n.....>.\n.^.....\nC0" +ObservationString(1) = ".....v.\n>.^....\n...^..<\n..>....\n.....<.\n.....>.\n.^.....\nC0" +ObservationTensor(0): +◉◯◉◉◉◉◉ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◯◉◉◯ ◯◯◯◉◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◯◉◯◉◉◉◉ ◯◯◉◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◉◉◉◉◉ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◯◉◉◯ ◯◯◯◉◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◯◉◯◉◉◉◉ ◯◯◉◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 4] +StringLegalActions() = ["P00", "P01", "P02", "P04"] + +# Apply action "P01" +action: 1 + +# State 41 +# ..v..v. +# >...... +# ...^..< +# ..>.... +# .....<. +# .....>. +# .^..... +# C1 +IsTerminal() = False +History() = [2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1] +HistoryString() = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 1 +InformationStateString(0) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1" +InformationStateString(1) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1" +ObservationString(0) = "..v..v.\n>......\n...^..<\n..>....\n.....<.\n.....>.\n.^.....\nC1" +ObservationString(1) = "..v..v.\n>......\n...^..<\n..>....\n.....<.\n.....>.\n.^.....\nC1" +ObservationTensor(0): +◉◯◉◉◉◉◉ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◯◉◉◯ ◯◯◯◉◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◉◯◯◉◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◯◉◉◉◉◉ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◯◉◉◯ ◯◯◯◉◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◯◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◯◉◉◯◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◉◯◯◉◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P10", "P11", "P12", "P13", "P14"] + +# Apply action "P12" +action: 2 + +# State 42 +# Apply action "P02" +action: 2 + +# State 43 +# Apply action "P13" +action: 3 + +# State 44 +# Apply action "P02" +action: 2 + +# State 45 +# Apply action "P13" +action: 3 + +# State 46 +# Apply action "P02" +action: 2 + +# State 47 +# Apply action "P13" +action: 3 + +# State 48 +# Apply action "P02" +action: 2 + +# State 49 +# Apply action "P14" +action: 4 + +# State 50 +# Apply action "P00" +action: 0 + +# State 51 +# Apply action "P13" +action: 3 + +# State 52 +# Apply action "P00" +action: 0 + +# State 53 +# Apply action "P14" +action: 4 + +# State 54 +# Apply action "P00" +action: 0 + +# State 55 +# Apply action "P10" +action: 0 + +# State 56 +# Apply action "P01" +action: 1 + +# State 57 +# Apply action "P11" +action: 1 + +# State 58 +# Apply action "P04" +action: 4 + +# State 59 +# Apply action "P11" +action: 1 + +# State 60 +# ....... +# >....v. +# .v....< +# ..v.>.. +# ...<... +# ......< +# ....... +# C0 +IsTerminal() = False +History() = [2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1] +HistoryString() = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 0 +InformationStateString(0) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1" +InformationStateString(1) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1" +ObservationString(0) = ".......\n>....v.\n.v....<\n..v.>..\n...<...\n......<\n.......\nC0" +ObservationString(1) = ".......\n>....v.\n.v....<\n..v.>..\n...<...\n......<\n.......\nC0" +ObservationTensor(0): +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◉◉◉◯◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◉◯◯◯ +◉◉◯◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ +◉◯◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◉ +◯◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◉◉◉◯◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◉◯◯◯ +◉◉◯◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ +◉◯◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◉◯◯◯◯◯ ◯◯◯◯◯◯◉ +◯◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 4] +StringLegalActions() = ["P00", "P01", "P04"] + +# Apply action "P00" +action: 0 + +# State 61 +# ....... +# >....v. +# ......< +# .vv.>.. +# ...<... +# ......< +# ....... +# C1 +IsTerminal() = False +History() = [2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1, 0] +HistoryString() = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1, 0" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = 1 +InformationStateString(0) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1, 0" +InformationStateString(1) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1, 0" +ObservationString(0) = ".......\n>....v.\n......<\n.vv.>..\n...<...\n......<\n.......\nC1" +ObservationString(1) = ".......\n>....v.\n......<\n.vv.>..\n...<...\n......<\n.......\nC1" +ObservationTensor(0): +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◉◉◉◯◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◉◯◯◯ +◉◯◯◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◉◉◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◯◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◉◉◉◯◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◉◯◯◯ +◉◯◯◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◉◉◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◉ +◯◉◉◉◉◯◉ ◯◯◯◯◯◯◯ ◉◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [0, 0] +Returns() = [0, 0] +LegalActions() = [0, 1, 2, 3, 4] +StringLegalActions() = ["P10", "P11", "P12", "P13", "P14"] + +# Apply action "P14" +action: 4 + +# State 62 +# Apply action "P00" +action: 0 + +# State 63 +# Apply action "P10" +action: 0 + +# State 64 +# Apply action "P00" +action: 0 + +# State 65 +# Apply action "P14" +action: 4 + +# State 66 +# Apply action "P01" +action: 1 + +# State 67 +# Apply action "P11" +action: 1 + +# State 68 +# Apply action "P04" +action: 4 + +# State 69 +# Apply action "P11" +action: 1 + +# State 70 +# Apply action "P00" +action: 0 + +# State 71 +# ....... +# ..>.... +# .....v< +# ....>.. +# .<..... +# ...<... +# ....... +# C2 +IsTerminal() = True +History() = [2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1, 0, 4, 0, 0, 0, 4, 1, 1, 4, 1, 0] +HistoryString() = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1, 0, 4, 0, 0, 0, 4, 1, 1, 4, 1, 0" +IsChanceNode() = False +IsSimultaneousNode() = False +CurrentPlayer() = -4 +InformationStateString(0) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1, 0, 4, 0, 0, 0, 4, 1, 1, 4, 1, 0" +InformationStateString(1) = "2, 1, 3, 0, 3, 2, 3, 1, 3, 1, 1, 1, 2, 1, 2, 3, 1, 0, 4, 1, 2, 1, 1, 3, 3, 2, 1, 2, 2, 1, 0, 0, 3, 2, 3, 1, 4, 0, 2, 0, 1, 2, 2, 3, 2, 3, 2, 3, 2, 4, 0, 3, 0, 4, 0, 0, 1, 1, 4, 1, 0, 4, 0, 0, 0, 4, 1, 1, 4, 1, 0" +ObservationString(0) = ".......\n..>....\n.....v<\n....>..\n.<.....\n...<...\n.......\nC2" +ObservationString(1) = ".......\n..>....\n.....v<\n....>..\n.<.....\n...<...\n.......\nC2" +ObservationTensor(0): +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◯◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◉◯◯◯ +◉◯◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◉◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◉ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +ObservationTensor(1): +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◯◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◉◯◯◯ +◉◯◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◉◯◯◯◯◯ +◉◉◉◉◯◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◉◯ ◯◯◯◯◯◯◉ +◉◉◯◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◉◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +◉◉◉◉◉◉◉ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ ◯◯◯◯◯◯◯ +Rewards() = [1, -1] +Returns() = [1, -1] diff --git a/open_spiel/python/tests/pyspiel_test.py b/open_spiel/python/tests/pyspiel_test.py index 5749796688..4c5b936647 100644 --- a/open_spiel/python/tests/pyspiel_test.py +++ b/open_spiel/python/tests/pyspiel_test.py @@ -116,6 +116,7 @@ "skat", "start_at", "solitaire", + "squadro", "stones_and_gems", "tarok", "tic_tac_toe",