diff --git a/16x16-overworld.png b/16x16-overworld.png
new file mode 100644
index 0000000..46075fe
Binary files /dev/null and b/16x16-overworld.png differ
diff --git a/README b/README
index ac6a3e4..5712149 100644
--- a/README
+++ b/README
@@ -1,32 +1,55 @@
-pyGOAP v.2
-
-My implimentation of GOAP AI.
-
-The main concept of GOAP is that AI is not a static table
-or set of states. Rather, GOAP uses a A* like search to
-find solutions to goals at runtime. This frees the
-designer of setting up complex behavior trees. This
-python implementation uses a simple text file and a python
-script to define actions for a GOAP agent. You can have
-a look in the npc folder and pirate.py.
-
-There is a test called pirate.py. It more-or-less test/demos
-the GOAP library.
-
-You should be able to run the pirate demo and watch how he
-satisfies his two goals: getting drunk and laid. =)
-You'll notice that he dances a lot. Thats because the dance
-action gives him 25 money, and he needs 100 to woo a lady.
-
-The source has a ton of comments, but most of them are just
-notes that I take so I don't forget stuff, since I don't
-always have time to work on this, and forget some of the
-details and ideas that I have for the future. That said,
-some of the comments are not truen, and you shouldn't trust
-them. =)
-
-pygoap is under a rather large rewrite as I prepare it for
-usage in large environments. This includes an environment
-modeler and pathfinding, among other things.
-
-Hope you have fun.
+for goap 3
+
+planner
+
+planner sould be able to reasonably predict the cost of repeating actions
+
+agents:
+in an rpg game, the player can expect to talk to NPC'sr and and to gather
+information about quests, items, other npc's, etc.
+
+i think that goap can become an engine of sorts, dictating every action in the
+game. basic play elements can be emulated through goap and would not have to
+be explicitly coded into the game. take simple "hello" meet/greet actions with
+an npc:
+ npc will have a insatable desire to talk to a player
+ they will have a different thing to say depending on mood/state of player
+
+a side effect of this will that npc's could possibly become more lifelike as
+they can move around the game world to satisfy goals. to moderate and conrol
+the npc's and to make the game more enjoyable, npc's can have goals that are
+only relevant at certain times of day or days of the week.
+
+in the harvest moon series for example, the nps do have certain schedules that
+they will loosely follow. this makes the game play predictable once their
+simple schedule is learned. imo, giving the npc's too much freedom to act wiil
+make the game world seem more random, and potentially frusterating to play.
+
+tying the speech system to goap agents could make the gameplay more immersive
+by allowing the player to ask unscripted questions. while developing a system
+that perfectly synthesizes english is not a viable option, giving the player
+the option to ask canned questions, unrealted to the quet or story, with the
+ability to choose specfic parts of the question is a definate cool thing.
+
+for example, the player might ask an npc "have you seen gary?". the goap
+agent can then search it's memories of gary and give a response to the player.
+because this would be based on the agent's memory, and not some canned
+response, it will simotaniously make the make more immersive and releave the
+game designers and writters the burden to creating dialog.
+
+a frusterating aspect of some games is dealing with faction alliances. for
+example, in some games, killing or doing some negative action against a member
+of a faction will cause all members of that faction to instantly become hostile
+toward the player. this is unrealistic in situations where the information
+that our your hostility could not have reached the other party.
+
+simulating the spread of information could also be simulated in goap by
+creating goals that one agent wants to tell other agents things that he saw the
+player do. for example, they may be nuetral npc's that will gossip with other
+npc's in distant towns. or, members of one faction may use radio, phones,
+letters, messengers, etc to tell other faction members about the player.
+
+this too could be emulated with goap, and would not have to be explicitly
+programmed.
+
+
diff --git a/TODO b/TODO
deleted file mode 100644
index 0d6e37b..0000000
--- a/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-GUI testbed
-Locomotion
\ No newline at end of file
diff --git a/formosa.tmx b/formosa.tmx
new file mode 100644
index 0000000..c484b55
--- /dev/null
+++ b/formosa.tmx
@@ -0,0 +1,16 @@
+
+
diff --git a/npc/pirate/.actions.py.swp b/npc/pirate/.actions.py.swp
new file mode 100644
index 0000000..46e0cd7
Binary files /dev/null and b/npc/pirate/.actions.py.swp differ
diff --git a/npc/pirate/actions.csv b/npc/pirate/actions.csv
deleted file mode 100644
index b9b6b94..0000000
--- a/npc/pirate/actions.csv
+++ /dev/null
@@ -1,15 +0,0 @@
-idle; ; idle
-do_dance; ; money = money + 25
-drink_rum; has_rum; is_drunk, charisma = charisma + 10
-buy_rum; money >= 25; has_rum, money = money - 25
-steal_rum; is_evil; has_rum
-steal_money; is_evil; money = money + 25
-sell_loot; has_loot; money = money + 25
-get_loot; is_sailing; has_loot
-go_sailing; has_boat; is_sailing
-beg_money; is_weak; money = money + 25
-buy_boat; money >= 25; has_boat, money = money - 25
-woo_lady; money >= 100, sees_lady, charisma >= 5; has_lady, money = money - 100
-get_laid; has_lady, is_drunk; is_laid
-make_rum; has_sugar; has_rum
-pilage_lady; is_evil, is_drunk, sees_lady; is_laid
diff --git a/npc/pirate/actions.py b/npc/pirate/actions.py
index d7ae60b..b71d52a 100644
--- a/npc/pirate/actions.py
+++ b/npc/pirate/actions.py
@@ -1,5 +1,71 @@
-from pygoap import CallableAction, CalledOnceAction, ACTIONSTATE_FINISHED
+"""
+This is an example module for programming actions for a pyGOAP agent.
+The module must contain a list called "exported_actions". This list should
+contain any classes that you would like to add to the planner.
+
+To make it convienent, I have chosen to add the class to the list after each
+declaration, although you may choose another way.
+"""
+
+from pygoap.actions import *
+from pygoap.goals import *
+
+
+DEBUG = 0
+
+def get_position(thing, bb):
+ """
+ get the position of the caller according to the blackboard
+ """
+
+ pos = None
+ a = []
+
+ tags = bb.read("position")
+ tags.reverse()
+ for tag in tags:
+ if tag['obj'] == thing:
+ pos = tag['position']
+ a.append(tag)
+ break
+
+ if pos == None:
+ raise Exception, "function cannot find position"
+
+ return pos
+
+exported_actions = []
+
+class move(ActionBuilder):
+ """
+ you MUST have a mechanism that depletes a counter when moving, otherwise
+ the planner will loop lessly moving the agent around to different places.
+ """
+
+ def get_actions(self, caller, bb):
+ """
+ return a list of action that this caller is able to move with
+ """
+
+ if not SimpleGoal(is_tired=True).test(bb):
+ pos = caller.environment.can_move_from(caller, dist=30)
+ return [ self.build_action(caller, p) for p in pos ]
+ else:
+ return []
+
+ def build_action(self, caller, pos):
+ a = move_action(caller)
+ a.effects.append(PositionGoal(target=caller, position=pos))
+ a.effects.append(SimpleGoal(is_tired=True))
+ return a
+
+
+
+class move_action(CallableAction):
+ pass
+
+exported_actions.append(move)
class look(CalledOnceAction):
@@ -7,74 +73,181 @@ def start(self):
self.caller.environment.look(self.caller)
super(look, self).start()
-class pickup_object(CalledOnceAction):
- def start(self):
- print "pickup"
+#exported_actions.append(look)
-class drink_rum(CallableAction):
+class pickup(ActionBuilder):
+ def get_actions(self, caller, bb):
+ """
+ return list of actions that will pickup an item at caller's position
+ """
+
+ caller_pos = get_position(caller, bb)
+
+ a = []
+ for tag in bb.read("position"):
+ if caller_pos == tag['position']:
+ if not tag['obj'] == caller:
+ if DEBUG: print "[pickup] add {}".format(tag['obj'])
+ a.append(self.build_action(caller, tag['obj']))
+
+ return a
+
+ def build_action(self, caller, item):
+ a = pickup_action(caller)
+ a.effects.append(HasItemGoal(caller, item))
+ return a
+
+class pickup_action(CalledOnceAction):
+ """
+ take an object from the environment and place it into your inventory
+ """
+ pass
+
+exported_actions.append(pickup)
+
+
+
+class drink_rum(ActionBuilder):
+ """
+ drink rum that is in caller's inventory
+ """
+
+ def make_action(self, caller, tag, bb):
+ a = drink_rum_action(caller)
+ a.effects.append(SimpleGoal(is_drunk=True))
+ a.effects.append(EvalGoal("charisma = charisma + 10"))
+
+ return a
+
+ def get_actions(self, caller, bb):
+ """
+ return list of actions that will drink rum from player's inv
+ """
+
+ a = []
+
+ for tag in bb.read("position"):
+ if tag['position'][0] == caller:
+ if DEBUG: print "[drink rum] 1 {}".format(tag)
+ if tag['obj'].name=="rum":
+ if DEBUG: print "[drink rum] 2 {}".format(tag)
+ a.append(self.make_action(caller, tag, bb))
+
+ return a
+
+
+class drink_rum_action(CallableAction):
def start(self):
self.caller.drunkness = 1
super(drink_rum, self).start()
- print self.caller, "is drinking some rum"
def update(self, time):
if self.valid():
- print self.caller, "is still drinking..."
self.caller.drunkness += 1
if self.caller.drunkness == 5:
self.finish()
else:
- self.fail()
+ self.fail()
def finish(self):
- print self.caller, "says \"give me more #$*@$#@ rum!\""
super(drink_rum, self).finish()
-class idle(CallableAction):
- def ok_finish(self):
- return True
+exported_actions.append(drink_rum)
+
+
+
+class idle(ActionBuilder):
+ def get_actions(self, caller, bb):
+ a = idle_action(caller)
+ a.effects = [SimpleGoal(is_idle=True)]
+ return [a]
+
+class idle_action(CalledOnceAction):
+ builder = idle
+
+exported_actions.append(idle)
- def finish(self):
- self.state = ACTIONSTATE_FINISHED
- print self.caller, "finished idling"
- #CallableAction.finish(self)
- #del self.caller.blackboard.tagDB['idle']
class buy_rum(CalledOnceAction):
- pass
+ def setup(self):
+ self.prereqs.append(NeverValidGoal())
+
+#exported_actions.append(buy_rum)
+
+
class steal_rum(CalledOnceAction):
- pass
+ def setup(self):
+ self.effects.append(HasItemGoal(name="rum"))
+
+#exported_actions.append(steal_rum)
+
+
class steal_money(CalledOnceAction):
- pass
+ def setup(self):
+ self.effects.append(EvalGoal("money = money + 25"))
+
+#exported_actions.append(steal_money)
+
+
class sell_loot(CalledOnceAction):
- pass
+ def setup(self):
+ self.prereqs.append(HasItemGoal(name="loot"))
+ self.effects.append(EvalGoal("money = money + 100"))
+
+#exported_actions.append(sell_loot)
+
+
class get_loot(CalledOnceAction):
- pass
+ def setup(self):
+ self.effects.append(HasItemGoal(name="loot"))
+
+#exported_actions.append(get_loot)
+
+
class go_sailing(CalledOnceAction):
pass
+#exported_actions.append(go_sailing)
+
+
+
class beg_money(CalledOnceAction):
- pass
+ def setup(self):
+ self.effects.append(EvalGoal("money = money + 5"))
+
+#exported_actions.append(beg_money)
+
-class buy_boat(CalledOnceAction):
- pass
class woo_lady(CalledOnceAction):
- pass
+ def setup(self):
+ self.prereqs = [
+ EvalGoal("money >= 100"),
+ EvalGoal("charisma >= 5"),
+ PositionGoal(max_dist=3, name="wench")]
+
+ self.effects = [
+ EvalGoal("money = money - 100")]
+
+#exported_actions.append(woo_lady)
+
+
class get_laid(CalledOnceAction):
pass
-class pilage_lady(CalledOnceAction):
- pass
+#exported_actions.append(get_laid)
+
-class make_rum(CalledOnceAction):
- pass
class do_dance(CalledOnceAction):
- pass
+ def setup(self):
+ self.effects.append(EvalGoal("money = money + 25"))
+
+#exported_actions.append(do_dance)
+
diff --git a/npc/pirate/actions.pyc b/npc/pirate/actions.pyc
new file mode 100644
index 0000000..0b449ad
Binary files /dev/null and b/npc/pirate/actions.pyc differ
diff --git a/npc/pirate/precept.csv b/npc/pirate/precept.csv
deleted file mode 100644
index e69de29..0000000
diff --git a/pirate.py b/pirate.py
index ed55536..d64d65a 100644
--- a/pirate.py
+++ b/pirate.py
@@ -15,147 +15,153 @@
along with this program. If not, see .
"""
-__version__ = ".004"
-
-import csv, sys, os, imp
-
-try:
- import psyco
-except ImportError:
- pass
-
-from pygoap import *
-
-
"""
lets make a drunk pirate.
scenerio:
the pirate begins by idling
- after 5 seconds of the simulation, he sees a girl
+ soon....he spyies a woman
he should attempt to get drunk and sleep with her...
...any way he knows how.
"""
-global_actions = {}
-
-def load_commands(agent, path):
- def is_expression(string):
- if "=" in string:
- return True
- else:
- return False
-
- def parse_line(p, e):
- prereqs = []
- effects = []
-
- if p == "":
- prereqs = None
- else:
- for x in p.split(","):
- x = x.strip()
- if is_expression(x):
- p2 = ExtendedActionPrereq(x)
- else:
- p2 = BasicActionPrereq(x)
+__version__ = ".008"
- prereqs.append(p2)
+from pygoap.agent import GoapAgent
+from pygoap.environment import ObjectBase
+from pygoap.tiledenvironment import TiledEnvironment
+from pygoap.goals import *
+import os, imp, sys
- for x in e.split(","):
- x = x.strip()
- if is_expression(x):
- e2 = ExtendedActionEffect(x)
- else:
- e2 = BasicActionEffect(x)
+from pygame.locals import *
- effects.append(e2)
- return prereqs, effects
+stdout = sys.stdout
+global_actions = {}
- # more hackery
+def load_commands(agent, path):
mod = imp.load_source("actions", os.path.join(path, "actions.py"))
+ global_actions = dict([ (c.__name__, c()) for c in mod.exported_actions ])
- csvfile = open(os.path.join(path, "actions.csv"))
- sample = csvfile.read(1024)
- dialect = csv.Sniffer().sniff(sample)
- has_header = csv.Sniffer().has_header(sample)
- csvfile.seek(0)
-
- r = csv.reader(csvfile, delimiter=';')
-
- for n, p, e in r:
- prereqs, effects = parse_line(p, e)
- action = SimpleActionNode(n, prereqs, effects)
- action.set_action_class(mod.__dict__[n])
- agent.add_action(action)
+ #for k, v in global_actions.items():
+ # print "testing action {}..."
+ # v.self_test()
- global_actions[n] = action
+ [ agent.add_action(a) for a in global_actions.values() ]
def is_female(precept):
try:
thing = precept.thing
except AttributeError:
- pass
+ return False
else:
if isinstance(thing, Human):
return thing.gender == "Female"
+
class Human(GoapAgent):
- def __init__(self, gender):
+ def __init__(self, gender, name="welp"):
super(Human, self).__init__()
self.gender = gender
-
- def handle_precept(self, precept):
- if is_female(precept):
- self.blackboard.post("sees_lady", True)
-
- return super(Human, self).handle_precept(precept)
+ self.name = name
def __repr__(self):
return "" % self.gender
+
def run_once():
- pirate = Human("Male")
+ import pygame
- # lets load some pirate commands
- load_commands(pirate, os.path.join("npc", "pirate"))
+ pygame.init()
+ screen = pygame.display.set_mode((480, 480))
+ pygame.display.set_caption('Pirate Island')
- pirate.current_action = global_actions["idle"].action_class(pirate, global_actions["idle"])
- pirate.current_action.start()
+ screen_buf = pygame.Surface((240, 240))
+
+ # make our little cove
+ formosa = TiledEnvironment("formosa.tmx")
- # the idle goal will always be run when it has nothing better to do
- pirate.add_goal(SimpleGoal("idle", value=.1))
+ time = 0
+ interactive = 1
- # he has high aspirations in life
- # NOTE: he needs to be drunk to get laid (see action map: actions.csv)
- pirate.add_goal(SimpleGoal("is_drunk"))
- pirate.add_goal(SimpleGoal("is_laid"))
- #pirate.add_goal(EvalGoal("money >= 50"))
+ run = True
+ while run:
+ stdout.write("=============== STEP {} ===============\n".format(time))
- # make our little cove
- formosa = XYEnvironment()
+ formosa.run(1)
+
+ if time == 1:
+ pirate = Human("Male", "jack")
+ load_commands(pirate, os.path.join("npc", "pirate"))
+ #pirate.add_goal(SimpleGoal(is_idle=True))
+ pirate.add_goal(SimpleGoal(is_drunk=True))
+ formosa.add_thing(pirate)
+
+ elif time == 3:
+ rum = ObjectBase("rum")
+ #pirate.add_goal(HasItemGoal(pirate, rum))
+ formosa.add_thing(rum)
+
+ elif time == 5:
+ formosa.move(rum, pirate.position)
+ pass
+
+ elif time == 6:
+ wench = Human("Female", "wench")
+ formosa.add_thing(wench)
+
+ screen_buf.fill((0,128,255))
+ formosa.render(screen_buf)
+ pygame.transform.scale2x(screen_buf, screen)
+ pygame.display.flip()
+
+ stdout.write("\nPRESS ANY KEY TO CONTINUE".format(time))
+ stdout.flush()
+
+ # wait for a keypress
+ try:
+ if interactive:
+ event = pygame.event.wait()
+ else:
+ event = pygame.event.poll()
+ while event:
+ if event.type == QUIT:
+ run = False
+ break
+
+ if not interactive: break
+
+ if event.type == KEYDOWN:
+ if event.key == K_ESCAPE:
+ run = False
+ break
- # add the pirate
- formosa.add_thing(pirate)
+ if event.type == KEYUP: break
+
+ if interactive:
+ event = pygame.event.wait()
+ else:
+ event = pygame.event.poll()
- # simulate the pirate idling
- formosa.run(15)
+ except KeyboardInterrupt:
+ run = False
- # add a female
- print "=== wench appears"
- wench = Human("Female")
- formosa.add_thing(wench)
+ stdout.write("\n\n");
+ time += 1
+
+ if time == 8: run = False
- # simulate with the pirate and female
- formosa.run(15)
if __name__ == "__main__":
import cProfile
import pstats
- cProfile.run('run_once()', "pirate.prof")
+
+ try:
+ cProfile.run('run_once()', "pirate.prof")
+ except KeyboardInterrupt:
+ pass
p = pstats.Stats("pirate.prof")
p.strip_dirs()
diff --git a/pygoap.py b/pygoap.py
deleted file mode 100644
index c02ffa1..0000000
--- a/pygoap.py
+++ /dev/null
@@ -1,1083 +0,0 @@
-"""
-Copyright 2010, Leif Theden
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see .
-"""
-
-"""
-this is a rewrite of my old goap library in an attempt to make
-it less of a mess of classes and try to consolidate everything
-into one class and to make it more event-based
-
-the current plan is to have a subprocess monitoring the
-environment and doing all the pygoap stuff in a seperate
-process.
-
-how to handle ai? multiprocessing to get around the GIL in CPython.
-why not threads? because we are CPU restricted, not IO.
-
-memory managers and blackboards should be related.
-this will allow for expectations, since a memory can be simulated in the future
-
-memory:
-should be a heap
-memory added will be wrapped with a counter
-everytime a memory is fetched, the counter will be added
-eventually, memories not being used will be removed
-and the counters will be reset
-
-memories should be a tree
-if a memory is being added that is similiar to an existing memory,
-then the existing memory will be updated, rather than replaced.
-
-to keep memory usage down, goals and effects are only instanced once.
-so, do not do anything crazy, like attempting to change a goal at runtime,
-since it will effect avery agent that relies on it.
-
-to handle idle actions:
- an idle action should have a very low cost associated with it
- the planner will then always choose this when there is nothing better to do
-
-
-"""
-
-from heapq import heappop, heappush
-from heapq import heappushpop
-from collections import deque
-
-import random
-import sys
-import traceback
-
-ACTIONSTATE_NOT_STARTED = 0
-ACTIONSTATE_FINISHED = 1
-ACTIONSTATE_RUNNING = 2
-ACTIONSTATE_PAUSED = 3
-ACTIONSTATE_BAILED = 4
-ACTIONSTATE_FAILED = 5
-
-
-DEBUG = False
-
-def dlog(text):
- print text
-
-class Precept(object):
- def __init__(self, *arg, **kwargs):
- self.__dict__.update(kwargs)
-
- def __repr__(self):
- return "" % self.__dict__
-
-def distance((ax, ay), (bx, by)):
- "The distance between two (x, y) points."
- return math.hypot((ax - bx), (ay - by))
-
-def distance2((ax, ay), (bx, by)):
- "The square of the distance between two (x, y) points."
- return (ax - bx)**2 + (ay - by)**2
-
-def clip(vector, lowest, highest):
- """Return vector, except if any element is less than the corresponding
- value of lowest or more than the corresponding value of highest, clip to
- those values.
- >>> clip((-1, 10), (0, 0), (9, 9))
- (0, 9)
- """
- return type(vector)(map(min, map(max, vector, lowest), highest))
-
-class Environment(object):
- """Abstract class representing an Environment. 'Real' Environment classes
- inherit from this.
- The environment keeps a list of .objects and .agents (which is a subset
- of .objects). Each agent has a .performance slot, initialized to 0.
- """
-
- def __init__(self, things=[], agents=[], time=0):
- self.time = time
- self.things = things
- self.agents = agents
-
- # TODO, if agents are passed, we need to init them, possibly
- # by sending the relivant precepts, (time, location, etc)
-
- self.action_que = []
-
- def default_location(self, object):
- """
- Default location to place a new object with unspecified location.
- """
- raise NotImplementedError
-
- def run(self, steps=1000):
- """
- Run the Environment for given number of time steps.
- """
- [ self.update(1) for step in xrange(steps) ]
-
- def add_thing(self, thing, location=None):
- """
- Add an object to the environment, setting its location. Also keep
- track of objects that are agents. Shouldn't need to override this.
- """
- thing.location = location or self.default_location(thing)
- self.things.append(thing)
-
- # add the agent
- if isinstance(thing, GoapAgent):
- thing.performance = 0
- thing.environment = self
- self.agents.append(thing)
-
- # should update vision for all interested agents (correctly, that is)
- [ self.look(a) for a in self.agents if a != thing ]
-
- def update(self, time_passed):
- """
- * Update our time
- * Update actions that may be running
- * Update all of our agents
- * Add actions to the que
- """
-
- #for a in self.agents:
- # print "agent:", a.blackboard.__dict__
-
- new_actions = []
-
- # update time
- self.time += time_passed
-
- self.action_que = [ a for a in self.action_que if a != None ]
-
- # update all the actions that may be running
- precepts = [ a.update(time_passed) for a in self.action_que ]
- precepts = [ p for p in precepts if p != None ]
-
- """
- # let agents know that they have finished an action
- new_actions.extend([ action.caller.handle_precept(
- Precept(sense="self", time=self.time, action=action))
- for action in self.action_que
- if action.state == ACTIONSTATE_FINISHED ])
- """
-
-
- # add the new actions
- self.action_que.extend(new_actions)
- self.action_que = [ a for a in self.action_que if a != None ]
-
- # remove actions that are completed
- self.action_que = [ a for a in self.action_que
- if a.state != ACTIONSTATE_FINISHED ]
-
- # send precepts of each action to the agents
- for p in precepts:
- actions = [ a.handle_precept(p) for a in self.agents ]
- self.action_que.extend([ a for a in actions if a != None ])
-
- # let all the agents know that time has passed
- t = Precept(sense="time", time=self.time)
- self.action_que.extend([ a.handle_precept(t) for a in self.agents ])
-
- # this is a band-aid until there is a fixed way to
- # manage actions returned from agents
- self.action_que = [ a for a in self.action_que if a != None ]
-
- # start any actions that are not started
- [ action.start() for action in self.action_que if action.state == ACTIONSTATE_NOT_STARTED ]
-
-class Pathfinding2D(object):
- def get_surrounding(self, location):
- """
- Return all locations around this one.
- """
-
- x, y = location
- return ((x-1, y-1), (x-1, y), (x-1, y+1), (x, y-1), (x, y+1),
- (x+1, y-1), (x+1, y), (x+1, y+1))
-
- def calc_h(self, location1, location2):
- return distance(location1, location2)
-
-class XYEnvironment(Environment, Pathfinding2D):
- """
- This class is for environments on a 2D plane, with locations
- labelled by (x, y) points, either discrete or continuous. Agents
- perceive objects within a radius. Each agent in the environment
- has a .location slot which should be a location such as (0, 1),
- and a .holding slot, which should be a list of objects that are
- held
- """
-
- def __init__(self, width=10, height=10):
- super(XYEnvironment, self).__init__()
- self.width = width
- self.height = height
-
- def look(self, caller, direction=None, distance=None):
- """
- Simulate vision.
-
- In normal circumstances, all kinds of things would happen here,
- like ray traces. For now, assume all objects can see every
- other object
- """
- a = [ caller.handle_precept(
- Precept(sense="sight", thing=t, location=t.location))
- for t in self.things if t != caller ]
-
- for action in a:
- if isinstance(action, list):
- self.action_que.extend(action)
- else:
- self.action_que.append(action)
-
- def objects_at(self, location):
- """
- Return all objects exactly at a given location.
- """
- return [ obj for obj in self.things if obj.location == location ]
-
- def objects_near(self, location, radius):
- """
- Return all objects within radius of location.
- """
- radius2 = radius * radius
- return [ obj for obj in self.things
- if distance2(location, obj.location) <= radius2 ]
-
- def default_location(self, thing):
- return (random.randint(0, self.width), random.randint(0, self.height))
-
-def get_exception():
- cla, exc, trbk = sys.exc_info()
- return traceback.format_exception(cla, exc, trbk)
-
-class PyEval(object):
- """
- A safer way to evaluate strings.
-
- probably should do some preprocessing to make sure its really safe
- NOTE: might modify the dict bassed to it. (not really tested)
- """
-
- def make_dict(self, bb=None):
- safe_dict = {}
-
- # clear out builtins
- safe_dict["__builtins__"] = None
-
- if bb != None:
- # copy the dictionaries
- safe_dict.update(bb)
-
- return safe_dict
-
- # mostly for prereq's
- def do_eval(self, expr, bb):
- d = self.make_dict(bb)
-
- #print "EVAL:", expr
- try:
- result = eval(expr, d)
- return result
- except NameError:
- # we are missing a value we need to evaluate the expression
- return 0
-
- # mostly for effect's
- def do_exec(self, expr, bb):
- d = self.make_dict(bb)
-
- #print "EXEC:", expr
-
- try:
- # a little less secure
- exec expr in d
-
- #except NameError as detail:
- # missing a value needed for the statement
- # we make a default value here, but really we should ask the agent
- # if it knows that to do with this name, maybe it knows....
-
- # get name of missing variable
- # name = detail[0].split()[1].strip('\'')
- # d[name] = 0
- # exec self.expr in d
-
- except NameError:
- detail = get_exception()
- name = detail[3].split('\'')[1]
- d[name] = 0
- exec expr in d
-
- # the bb was modified
- for key, value in d.items():
- if key[:2] != "__":
- bb[key] = value
-
- return True
-
- def measured_eval(self, expr_list, bb, goal_expr):
- """
- do a normal exec, but compare the results
- of the expr and return a fractional value
- that represents how effective the expr is
- """
-
- # prepare our working dict
- d0 = self.make_dict(bb)
-
- # build our test dict by evaluating each expression
- for expr in exec_list:
- # exec the expression we are testing
- finished = False
- while finished == False:
- finished = True
- try:
- exec exec_expr in d0
- except NameError as detail:
- finished = False
- name = detail[0].split()[1].strip('\'')
- d0[name] = 0
-
- return self.cmp_dict(self, d0, goal_expr)
-
- def cmp_bb(self, d, goal_expr):
-
- # this only works for simple expressions
- cmpop = (">", "<", ">=", "<=", "==")
-
- i = 0
- index = 0
- expr = goal_expr.split()
- while index == 0:
- try:
- index = expr.index(cmpop[i])
- except:
- i += 1
- if i > 5: break
-
- try:
- side0 = float(eval(" ".join(expr[:index]), d))
- side1 = float(eval(" ".join(expr[index+1:]), d))
- except NameError:
- return float(0)
-
- cmpop = cmpop[i]
-
- if (cmpop == ">") or (cmpop == ">="):
- if side0 == side1:
- v = 1.0
- elif side0 > side1:
- v = side0 / side1
- elif side0 < side1:
- if side0 == 0:
- v = 0
- else:
- v = 1 - ((side1 - side0) / side1)
-
- if v > 1: v = 1.0
- if v < 0: v = 0.0
-
- return v
-
- def cmp_bb2(self, d, goal_expr):
-
- # this only works for simple expressions
- cmpop = (">", "<", ">=", "<=", "==")
-
- i = 0
- index = 0
- expr = goal_expr.split()
- while index == 0:
- try:
- index = expr.index(cmpop[i])
- except:
- i += 1
- if i > 5: break
-
- try:
- side0 = float(eval(" ".join(expr[:index]), d))
- side1 = float(eval(" ".join(expr[index+1:]), d))
- except NameError:
- return float(0)
-
- cmpop = cmpop[i]
-
- if (cmpop == ">") or (cmpop == ">="):
- if side0 == 0: return side1
- v = 1 - ((side1 - side0) / side1)
- elif (cmpop == "<") or (cmpop == "<="):
- if side1 == 0: return side0
- v = (side0 - side1) / side0
-
- return v
-
-
- def __str__(self):
- return "" % self.expr
- return v0 >= v1
-
-def calcG(node):
- cost = node.cost
- while node.parent != None:
- node = node.parent
- cost += node.cost
- return cost
-
-class PlanningNode(object):
- """
- each node has a copy of a bb (self.bb_delta) in order to simulate a plan.
- """
-
- def __init__(self, parent, obj, cost, h, bb=None, touch=True):
- self.parent = parent
- self.obj = obj
- self.cost = cost
- self.g = calcG(self)
- self.h = h
-
- self.bb_delta = {}
-
- if parent != None:
- self.bb_delta.update(parent.bb_delta)
- elif bb != None:
- self.bb_delta.update(bb)
-
- if touch: self.obj.touch(self.bb_delta)
-
- def __repr__(self):
- try:
- return "" % \
- (self.obj, self.cost, self.parent.obj, self.bb_delta.tags())
- except AttributeError:
- return "" % \
- (self.obj, self.cost, self.bb_delta.tags())
-
-
-class BasicActionPrereq(object):
- """
- Basic - just look for the presence of a tag on the bb.
- """
-
- def __init__(self, prereq):
- self.prereq = prereq
-
- def valid(self, bb):
- """
- Given the bb, can we run this action?
- """
-
- if (self.prereq == None) or (self.prereq == ""):
- return 1.0
- elif self.prereq in bb.keys():
- return 1.0
- else:
- return 0.0
-
- def __repr__(self):
- return "" % self.prereq
-
-class ExtendedActionPrereq(object):
- """
- These classes can use strings that evaluate variables on the blackboard.
- """
-
- def __init__(self, prereq):
- self.prereq = prereq
-
- def valid(self, bb):
- #e = PyEval()
- #return e.do_eval(self.prereq, bb)
-
- e = PyEval()
- d = {}
- d.update(bb)
- return e.cmp_bb(d, self.prereq)
-
- def __repr__(self):
- return "" % self.prereq
-
-class BasicActionEffect(object):
- """
- Basic - Simply post a tag with True as the value.
- """
-
- def __init__(self, effect):
- self.effect = effect
-
- def touch(self, bb):
- bb[self.effect] = True
-
- def __repr__(self):
- return "" % self.effect
-
-class ExtendedActionEffect(object):
- """
- Extended - Use PyEval.
- """
-
- def __init__(self, effect):
- self.effect = effect
-
- def touch(self, bb):
- e = PyEval()
- bb = e.do_exec(self.effect, bb)
-
- def __repr__(self):
- return "" % self.effect
-
-
-class GoalBase(object):
- """
- Goals:
- can be satisfied.
- can be valid
- """
-
- def __init__(self, s=None, r=None, value=1):
- self.satisfies = s
- self.relevancy = r
- self.value = value
-
- def get_relevancy(self, bb):
- """
- Return a float 0-1 on how "relevent" this goal is.
- Should be subclassed =)
- """
- raise NotImplementedError
-
- def satisfied(self, bb):
- """
- Test whether or not this goal has been satisfied
- """
- raise NotImplementedError
-
- def __repr__(self):
- return "" % self.satisfies
-
-class AlwaysSatisfiedGoal(GoalBase):
- """
- goal will never be satisfied.
-
- use for an idle condition
- """
-
- def get_relevancy(self, bb):
- return self.value
-
- def satisfied(self, bb):
- return 1.0
-
-class SimpleGoal(GoalBase):
- """
- Uses flags on a blackboard to test goals.
- """
-
- def get_relevancy(self, bb):
- if self.satisfies in bb.tags():
- return 0.0
- else:
- return self.value
-
- def satisfied(self, bb):
- try:
- bb[self.satisfies]
- except KeyError:
- return 0.0
- else:
- return 1.0
-
-class EvalGoal(GoalBase):
- """
- This goal will use PyEval objects to return
- a fractional value on how satisfied it is.
-
- These will enable the planner to execute
- a plan, even if it is not the best one.
- """
-
- def __init__(self, expr):
- self.expr = expr
- self.satisfies = "non"
- self.relevancy = 0
- self.value = 1
-
- def get_relevancy(self, bb):
- return .5
-
- def satisfied(self, bb):
- e = PyEval()
- d = {}
- d.update(bb)
- return e.cmp_bb(d, self.expr)
-
-class SimpleActionNode(object):
- """
- action:
- has a prereuisite
- has a effect
- has a reference to a class to "do" the action
-
- this is like a singleton class, to cut down on memory usage
-
- TODO:
- use XML to store the action's data.
- names matched as locals inside the bb passed
- """
-
- def __init__(self, name, p=None, e=None):
- self.name = name
- self.prereqs = []
- self.effects = []
-
- # costs.
- self.time_cost = 0
- self.move_cost = 0
-
- self.start_func = None
-
- try:
- self.effects.extend(e)
- except:
- self.effects.append(e)
-
- try:
- self.prereqs.extend(p)
- except:
- self.prereqs.append(p)
-
- def set_action_class(self, klass):
- self.action_class = klass
-
- def valid(self, bb):
- """
- return a float from 0-1 that describes how valid this action is.
-
- validity of an action is a measurement of how effective the action
- will be if it is completed successfully.
-
- if any of the prereqs are not partially valid ( >0 ) then will
- return 0
-
- this value will be used in planning.
-
- for many actions a simple 0 or 1 will work. for actions which
- modify numerical values, it may be useful to return a fractional
- value.
- """
-
- total = [ i.valid(bb) for i in self.prereqs ]
- if 0 in total: return 0
- return float(sum(total)) / len(self.prereqs)
-
- # this is run when the action is succesful
- # do something on the blackboard (varies by subclass)
- def touch(self, bb):
- [ i.touch(bb) for i in self.effects ]
-
- def __repr__(self):
- return "" % self.name
-
-class CallableAction(object):
- """
- callable action class.
-
- subclass this class to implement the code side of actions.
- for the most part, "start" and "update" will be the most
- important methods to use
- """
-
- def __init__(self, caller, validator):
- self.caller = caller
- self.validator = validator
- self.state = ACTIONSTATE_NOT_STARTED
-
- def touch(self):
- """
- mark the parent's blackboard to reflect changes
- of a successful execution
- """
- self.validator.touch(self.caller.blackboard.tagDB)
-
- def valid(self, do=False):
- """
- make sure the action is able to be started
- """
- return self.validator.valid(self.caller.blackboard.tagDB)
-
- def start(self):
- """
- start running the action
- """
- self.state = ACTIONSTATE_RUNNING
- print self.caller, "is starting to", self.__class__.__name__
-
- def update(self, time):
- """
- actions which occur over time should implement
- this method.
-
- if the action does not need more that one cycle, then
- you should use the calledonce class
- """
-
- def fail(self, reason=None):
- """
- maybe what we planned to do didn't work for whatever reason
- """
- self.state = ACTIONSTATE_FAILED
-
- def bail(self):
- """
- stop the action without the ability to complete or continue
- """
- self.state = ACTIONSTATE_BAILED
-
- def finish(self):
- """
- the planned action was completed and the result
- is correct
- """
- if self.state == ACTIONSTATE_RUNNING:
- self.state = ACTIONSTATE_FINISHED
- self.touch()
- print self.caller, "is finshed", self.__class__.__name__
-
- def ok_finish(self):
- """
- determine if the action can finish now
- if cannot finish now, then the action
- will bail if it is forced to finish.
- """
-
- return self.state == ACTIONSTATE_FINISHED
-
-class CalledOnceAction(CallableAction):
- """
- Is finished imediatly when started.
- """
- def start(self):
- # valid might return a value less than 1
- # this means that some of the prereqs are not
- # completely satisfied.
- # since we want everything to be completely
- # satisfied, we require valid == 1.
- if self.valid() == 1:
- CallableAction.start(self)
- CallableAction.finish(self)
- else:
- self.fail()
-
- def update(self, time):
- pass
-
-class PausableAction(CallableAction):
- def pause(self):
- self.state = ACTIONSTATE_PAUSED
-
-class Blackboard(object):
- """
- Memory device meant to be shared among Agents
- used for planning.
- data should not contain references to other objects
- references will cause the planning phase to use inconsistant data
- only store copies of objects
-
- blackboards that will be shared amongst agents will have to
- store the creating agent in the memories metadata
-
- class attribute access may be emulated and use a blackboard
- as the dict, instead of (self.__dict__)
-
- this will allow game object programming in a way that does not
- have to deal with details of the AI subsystem
-
- planning may introduce it's own variables onto the blackboard
- they will always have a "_goap_" prefix
- """
-
- def __init__(self):
- self.memory = []
- self.tagDB = {}
-
- def tags(self):
- return self.tagDB.keys()
-
- def add(self, precept, tags=[]):
- self.memory.append(precept)
- for tag in tags:
- self.tagDB[tag] = precept
-
- def post(self, tag, value):
- self.tagDB[tag] = value
-
- def read(self, tag):
- return self.tagDB[tag]
-
- def search(self):
- return self.memory[:]
-
-def time_filter(precept):
- if precept.sense == "time":
- return None
- else:
- return precept
-
-def get_children(node, actions, duplicate_parent=False):
-
- # do some stuff to determine the children of this node
- if duplicate_parent:
- skip = []
- else:
- skip = [node.obj]
-
- node0 = node
- while node0.parent != None:
- node0 = node0.parent
- skip.append(node0.obj)
-
- children = []
- for a in [ i for i in actions if i not in skip]:
- score = a.valid((node.bb_delta))
- if score > 0: children.append((score, PlanningNode(node, a, 1, 1)))
-
- children.sort()
-
- return children
-
-class GoapAgent(object):
- """
- AI thingy
-
- every agent should have at least one goal (otherwise, why use it?)
- """
-
- # this will set this class to listen for this type of precept
- interested = []
-
- def __init__(self):
- self.idle_timeout = 30
- self.blackboard = Blackboard()
-
- self.current_action = None # what action is being carried out now.
- # This must be a real action.
- # reccommened to use a idle action if nothin else
-
- self.goals = [] # all goals this instance will use
- self.invalid_goals = [] # goals that cannot be satisfied now
- self.filters = [] # filter precepts. see the method of same name.
- self.actions = [] # all actions this npc can perform
- self.plan = []
- self.current_goal = None
-
- # handle time precepts intelligently
- self.filters.append(time_filter)
-
- def add_goal(self, goal):
- self.goals.append(goal)
-
- def remove_goal(self, goal):
- self.goals.remove(goal)
-
- def invalidate_goal(self, goal):
- self.invalid_goals.append(goal)
-
- def add_action(self, action):
- self.actions.append(action)
-
- def remove_action(self, action):
- self.actions.remove(action)
-
- def filter_precept(self, precept):
- """
- precepts can be put through filters to change them.
- this can be used to simulate errors in judgement by the agent.
- """
-
- for f in self.filters: precept = f(precept)
- return precept
-
- def handle_precept(self, precept):
- """
- do something with the precept
- the goals will be re-evaulated based on our new precept, if any
-
- also managed the plan, so this should be called occasionally
- # use time precepts
-
- """
-
- # give our filters a chance to change the precept
- precept = self.filter_precept(precept)
-
- # our filters may have caused us to ignore the precept
- if precept != None:
- self.blackboard.add(precept)
- self.invalid_goals = []
-
- return self.replan()
-
- def replan(self):
- if self.current_action == None: return None
-
- # use this oppurtunity to manage our plan
- if (self.plan == []) and (self.current_action.ok_finish()):
- self.plan = self.get_plan()
-
- if self.plan != []:
-
- if self.current_action.state == ACTIONSTATE_FINISHED:
- action = self.plan.pop()
- self.current_action = action.action_class(self, action)
- return self.current_action
-
- elif self.current_action.state == ACTIONSTATE_FAILED:
- self.plan = self.get_plan()
-
- elif self.current_action.state == ACTIONSTATE_RUNNING:
- if self.current_action.__class__ == self.plan[-1].action_class:
- self.plan.pop()
- else:
- if self.current_action.ok_finish():
- self.current_action.finish()
-
-
-
- def get_plan(self):
- """
- pick a goal and plan if able to
-
- consolidated to remove function calls....
- """
-
- # UPDATE THE GOALS
- s = [(goal.get_relevancy(self.blackboard), goal)
- for goal in self.goals
- if goal not in self.invalid_goals]
-
- # SORT BY RELEVANCY
- s.sort(reverse=True)
- goals = [ i[1] for i in s ]
-
- for goal in goals:
- ok, plan = self.search_actions(self.actions, self.current_action.validator, self.blackboard, goal)
- if ok == False:
- print self, "cannot", goal
- self.invalidate_goal(goal)
- else:
- print self, "wants to", goal
- if len(plan) > 1: plan = plan[:-1]
- return plan
-
- return []
-
- def search_actions(self, actions, start_action, start_blackboard, goal):
- """
- actions must be a list of actions that can be used to satisfy the goal.
- start must be an action, blackboard represents current state
- goal must be a goal
-
- differs slightly from normal astar in that:
- there are no connections between nodes
- the state of the "map" changes as the nodes are traversed
- there is no closed list (behaves like a tree search)
-
- because of the state being changed as the algorithm progresses,
- state has be be saved with each node. also, there will be several
- copies of the nodes, since they will have different state.
-
- sometime, i will implement different factors that will adjust the data
- given to reflect different situations with the agent.
-
- for example, it would be nice sometimes to search for a action set that
- is very short and the time required is also short. this would be good
- for escapes.
-
- in other situations, if time is not a consideration, then maybe the best
- action set would be different.
-
- the tree thats built here will always have the same shape for each
- action map. we just need to priotize it.
- """
-
- pushback = None # the pushback is used to limit node access in the heap
- success = False
-
- keyNode = PlanningNode(None, start_action, 0, 0, start_blackboard.tagDB, False)
-
- heap = [(0, keyNode)]
-
- # the root can return a copy of itself, the others cannot
- return_parent = 1
-
- while len(heap) != 0:
-
- # get the best node. if we have a pushback, then push it and pop the best
- if pushback == None:
- keyNode = heappop(heap)[1]
- else:
- keyNode = heappushpop(heap, (pushback.g + pushback.h, pushback))[1]
- pushback = None
-
- # if our goal is satisfied, then stop
- #if (goal.satisfied(keyNode.bb_delta)) and (return_parent == 0):
- if goal.satisfied(keyNode.bb_delta):
- success = True
- break
-
- # go through each child and determine the best one
- for score, child in get_children(keyNode, actions, return_parent):
- if child in heap:
- possG = keyNode.g + child.cost
- if (possG < child.g):
- child.parent = keyNode
- child.g = calcG(child)
- # TODO: update the h score
- else:
- # add node to our heap, using pushpack if needed
- if pushback == None:
- heappush(heap, (child.g + child.h, child))
- else:
- heappush(heap, (pushback.g + pushback.h, pushback))
- pushpack = child
-
- return_parent = 0
-
- if success:
- #if keyNode.parent == None: return True, []
- path0 = [keyNode.obj]
- path1 = [keyNode]
- while keyNode.parent != None:
- keyNode = keyNode.parent
- path0.append(keyNode.obj)
- path1.append(keyNode)
-
- # determine if we have suboptimal goals
- for a in path1:
- if a.parent != None:
- prereqs = [ (p.valid(a.parent.bb_delta), p) for p in a.obj.prereqs ]
- failed = [ p[1] for p in prereqs if p[0] < 1.0 ]
- if len(failed) > 0:
- new_goal = EvalGoal(failed[0].prereq)
- new_plan = self.search_actions(actions, start_action, start_blackboard, new_goal)
- return new_plan
-
- return True, path0
- else:
- return False, []
-
-
diff --git a/pygoap/.README.swp b/pygoap/.README.swp
new file mode 100644
index 0000000..d155450
Binary files /dev/null and b/pygoap/.README.swp differ
diff --git a/pygoap/.actions.py.swp b/pygoap/.actions.py.swp
new file mode 100644
index 0000000..a79a12a
Binary files /dev/null and b/pygoap/.actions.py.swp differ
diff --git a/pygoap/.agent.py.swp b/pygoap/.agent.py.swp
new file mode 100644
index 0000000..4621199
Binary files /dev/null and b/pygoap/.agent.py.swp differ
diff --git a/pygoap/.blackboard.py.swp b/pygoap/.blackboard.py.swp
new file mode 100644
index 0000000..b1a490a
Binary files /dev/null and b/pygoap/.blackboard.py.swp differ
diff --git a/pygoap/.environment.py.swp b/pygoap/.environment.py.swp
new file mode 100644
index 0000000..2a0087b
Binary files /dev/null and b/pygoap/.environment.py.swp differ
diff --git a/pygoap/.environment2d.py.swp b/pygoap/.environment2d.py.swp
new file mode 100644
index 0000000..73b6ab1
Binary files /dev/null and b/pygoap/.environment2d.py.swp differ
diff --git a/pygoap/.goals.py.swp b/pygoap/.goals.py.swp
new file mode 100644
index 0000000..b9c0baa
Binary files /dev/null and b/pygoap/.goals.py.swp differ
diff --git a/pygoap/.goaltests.py.swp b/pygoap/.goaltests.py.swp
new file mode 100644
index 0000000..e04e46c
Binary files /dev/null and b/pygoap/.goaltests.py.swp differ
diff --git a/pygoap/.planning.py.swp b/pygoap/.planning.py.swp
new file mode 100644
index 0000000..d81239c
Binary files /dev/null and b/pygoap/.planning.py.swp differ
diff --git a/pygoap/.tiledenvironment.py.swp b/pygoap/.tiledenvironment.py.swp
new file mode 100644
index 0000000..645a8e9
Binary files /dev/null and b/pygoap/.tiledenvironment.py.swp differ
diff --git a/pygoap/README b/pygoap/README
new file mode 100644
index 0000000..5ff67eb
--- /dev/null
+++ b/pygoap/README
@@ -0,0 +1,102 @@
+Copyright 2010, Leif Theden
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+
+
+
+
+
+how to handle ai? multiprocessing to get around the GIL in CPython.
+why not threads? because we are CPU restricted, not IO.
+
+memory managers and blackboards should be related.
+this will allow for expectations, since a memory can be simulated in the future
+
+memory:
+should be a heap
+memory added will be wrapped with a counter
+everytime a memory is fetched, the counter will be added
+eventually, memories not being used will be removed
+and the counters will be reset
+
+memories should be a tree
+if a memory is being added that is similiar to an existing memory,
+then the existing memory will be updated, rather than replaced.
+
+since goals and prereqs share a common function, "valid", it makes sence to
+make them subclasses of a common class.
+
+looking at actions.csv, it is easy to see that the behaviour of the agent will
+be largely dependent on how well the action map is defined. with the little
+pirate demo, it is not difficult to model his behaviour, but with larger, more
+complex agents, it could quickly become a huge task to write and verify the
+action map.
+
+with some extra steps, i would like to make it possible that the agent can
+infer the prereq's to a goal through clues provided within the objects that the
+agent interacts with, rather than difining them within the class. i can forsee
+a performace penality for this, but that could be offset by constructing
+training environments for the agent and then storing the action map that the
+agent creats during training.
+
+the planner:
+GOAP calls for a hurrstic to be used to find the optimal solution and to reduce
+the number of checks made. in a physical environment where a star is used,
+it makes sence to just find a vector from the current searched node to the
+goal, but in action planning, there is no spatial dimension where a simple
+solution like that can be used.
+
+without a huerstic, a* is just a tree search. the heurstic will increase the
+effeciency of the planner and possibly give more consitsent results. it can
+also be used to guide an agents behavoiur by manipulating some values.
+
+for now, while testing and building the library, the h value will not be used.
+when the library is more complete, it would make sence to build a complete
+agent, then construct a set of artificial scenereos to train the agent.
+based on data from the scenerios, it could be possible to hardcode the h
+values. the planner could then be optimised for certain scenereos.
+
+This module contains the most commonly used parts of the system. Classes that
+have many related sibling classes are in other modules.
+
+
+since planning is done on the blackboard and i would like agents to be able to
+make guesses, or plans about other agents, then agents will have to somehow be
+able to be stored on and maipulated on a blackboard. this may meant that
+agents and precepts will be the same thing
+
+1/15/12:
+overhauled the concepts of goals, prereqs, and effects and rolled them into
+one class. with the new system of instanced actions, it makes design sense to
+consolidate them, since they all have complimentary functionality. from a
+performace standpoint, it may make sense to keep the seperate, but this way is
+much easier to conceptualize in your mind, and i am not making an system that
+is performace sensative....this is python.
+
+simplify holding/inventory:
+ an objects location should always be a tuple of:
+ ( holding object, position )
+
+this will make position very simple to sort. the position in the tuple should
+be a value the that holding object can make sence of, for example, an
+environment might expect a zone, and (x,y), while an agent would want an index
+number in their inventory.
+
+a side effect will be that location goals and holding goals can be consolidated
+into one function.
+
+new precepts may render a current plan invalid. to account for this, an agent
+will replan everytime it receives a precept. a better way would be to tag a
+type of precept and then if a new one that directly relates to the plan arrives
+then replan.
diff --git a/pygoap/__init__.py b/pygoap/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/pygoap/__init__.py
@@ -0,0 +1 @@
+
diff --git a/pygoap/actions.py b/pygoap/actions.py
new file mode 100644
index 0000000..e7f1075
--- /dev/null
+++ b/pygoap/actions.py
@@ -0,0 +1,194 @@
+"""
+These are the building blocks for creating pyGOAP agents that are able to
+interact with their environment in a meaningful way.
+
+When actions are updated, they can return a precept for the environment to
+process. The action can emit a sound, sight, or anything else for other
+objects to consume.
+
+These classes will be known to an agent, and chosen by the planner as a means
+to satisfy the current goal. They will be instanced and the the agent will
+execute the action in some way, one after another.
+
+There is a delicate balance between the actions here and the "ActionEffects"
+and "ActionPrereqs" that you will have to master. A simple way to mentally
+distinguish them is that the prereqs and effects are clues to the planner to
+behave in a certain way and will never change anything except a blackboard.
+The action classes here are the 'guts' for the action and will modify the game
+environment in some meaningful way.
+
+
+Actions need to be split into ActionInstances and ActionBuilders.
+
+An ActionInstance's job is to work in a planner and to carry out actions.
+A ActionBuilder's job is to query the caller and return a list of suitable
+actions for the bb.
+"""
+
+from planning import *
+from actionstates import *
+import sys
+
+
+test_fail_msg = "some goal is returning None on a test, this is a bug."
+
+class ActionBuilder(object):
+ """
+ ActionBuilders examine a blackboard and return a list of actions
+ that can be succesfully completed at the the time.
+ """
+
+ def get_actions(self, caller, bb):
+ raise NotImplementedError
+
+ def __init__(self, **kwargs):
+ self.prereqs = []
+ self.effects = []
+ self.costs = {}
+
+ self.__dict__.update(kwargs)
+
+ self.setup()
+
+ def setup(self):
+ """
+ add the prereqs, effects, and costs here
+ override this
+ """
+ pass
+
+ def __repr__(self):
+ return "".format(self.__class__.__name__)
+
+
+class CallableAction(InstancedAction):
+ """
+ callable action class.
+
+ subclass this class to implement the code side of actions.
+ for the most part, "start" and "update" will be the most
+ important methods to overload.
+ """
+
+ def __init__(self, caller, **kwargs):
+ self.caller = caller
+ self.state = ACTIONSTATE_NOT_STARTED
+
+ self.prereqs = []
+ self.effects = []
+ self.costs = {}
+
+ self.__dict__.update(kwargs)
+
+ def test(self, bb=None):
+ """
+ make sure the action is able to be started
+ return a float from 0-1 that describes how valid this action is.
+
+ validity of an action is a measurement of how effective the action
+ will be if it is completed successfully.
+
+ if any of the prereqs are not partially valid ( >0 ) then will
+ return 0
+
+ this value will be used in planning.
+
+ for many actions a simple 0 or 1 will work. for actions which
+ modify numerical values, it may be useful to return a fractional
+ value.
+ """
+
+ # NOTE: may be better written with itertools
+
+ if len(self.prereqs) == 0: return 1.0
+ if bb == None: raise Exception
+ total = [ i.test(bb) for i in self.prereqs ]
+ print "[goal] {} test {}".format(self, total)
+ #if 0 in total: return 0
+ try:
+ return float(sum(total)) / len(self.prereqs)
+ except TypeError:
+ print zip(total, self.prereqs)
+ print test_fail_msg
+ sys.exit(1)
+
+
+ def touch(self, bb=None):
+ """
+ call when the planning phase is complete
+ """
+ if bb == None: bb = self.caller.bb
+ [ i.touch(bb) for i in self.effects ]
+
+
+ def start(self):
+ """
+ start running the action
+ """
+ self.state = ACTIONSTATE_RUNNING
+
+ def update(self, time):
+ """
+ actions which occur over time should implement
+ this method.
+
+ if the action does not need more that one cycle, then
+ you should use the calledonceaction class
+ """
+ pass
+
+ def fail(self, reason=None):
+ """
+ maybe what we planned to do didn't work for whatever reason
+ """
+ self.state = ACTIONSTATE_FAILED
+
+ def abort(self):
+ """
+ stop the action without the ability to complete or continue
+ """
+ self.state = ACTIONSTATE_BAILED
+
+ def finish(self):
+ """
+ the planned action was completed and the result is correct
+ """
+ if self.state == ACTIONSTATE_RUNNING:
+ self.state = ACTIONSTATE_FINISHED
+
+ def ok_finish(self):
+ """
+ determine if the action can finish now
+ if cannot finish now, then the action
+ should bail if it is forced to finish.
+ """
+ return self.state == ACTIONSTATE_FINISHED
+
+ def pause(self):
+ """
+ stop the action from updating. should be able to continue.
+ """
+ self.state = ACTIONSTATE_PAUSED
+
+
+class CalledOnceAction(CallableAction):
+ """
+ Is finished imediatly when started.
+ """
+
+ def start(self):
+ # valid might return a value less than 1
+ # this means that some of the prereqs are not
+ # completely satisfied.
+ # since we want everything to be completely
+ # satisfied, we require valid == 1.
+ if self.test() == 1.0:
+ CallableAction.start(self)
+ CallableAction.finish(self)
+ else:
+ self.fail()
+
+ def update(self, time):
+ pass
+
+
diff --git a/pygoap/actionstates.py b/pygoap/actionstates.py
new file mode 100644
index 0000000..16c8070
--- /dev/null
+++ b/pygoap/actionstates.py
@@ -0,0 +1,6 @@
+ACTIONSTATE_NOT_STARTED = 0
+ACTIONSTATE_FINISHED = 1
+ACTIONSTATE_RUNNING = 2
+ACTIONSTATE_PAUSED = 3
+ACTIONSTATE_ABORTED = 4
+ACTIONSTATE_FAILED = 5
diff --git a/pygoap/agent.py b/pygoap/agent.py
new file mode 100644
index 0000000..a6d8cd1
--- /dev/null
+++ b/pygoap/agent.py
@@ -0,0 +1,177 @@
+"""
+fill in later
+"""
+
+from environment import ObjectBase
+from planning import plan, InstancedAction
+from blackboard import Blackboard, MemoryManager, Tag
+from actionstates import *
+
+
+NullAction = InstancedAction()
+
+
+# required to reduce memory usage
+def time_filter(precept):
+ if precept.sense == "time":
+ return None
+ else:
+ return precept
+
+
+class GoapAgent(ObjectBase):
+ """
+ AI Agent
+
+ every agent should have at least one goal (otherwise, why use it?)
+ inventories will be implemented using precepts and a list.
+
+ currently, only one action running concurrently is supported.
+ """
+
+ # this will set this class to listen for this type of precept
+ # not implemented yet
+ interested = []
+
+ def __init__(self):
+ self.idle_timeout = 30
+ self.bb = Blackboard()
+ self.mem_manager = MemoryManager(self)
+ self.planner = plan
+
+ self.current_goal = None
+
+ self.goals = [] # all goals this instance can use
+ self.invalid_goals = [] # goals that cannot be satisfied now
+ self.filters = [] # list of methods to use as a filter
+ self.actions = [] # all actions this npc can perform
+ self.plan = [] # list of actions to perform
+ # '-1' will be the action currently used
+
+ # this special filter will prevent time precepts from being stored
+ self.filters.append(time_filter)
+
+ def add(self, other, origin):
+ # we simulate the agent's knowledge of its inventory with precepts
+ p = Precept(sense="inventory")
+
+ # do the actual add
+ super(GoapAgent, self).add(other, origin)
+
+ def remove(self, obj):
+ # we simulate the agent's knowledge of its inventory with precepts
+ p = Precept(sense="inventory")
+
+ # do the actual remove
+ super(GoapAgent, self).remove(other, origin)
+
+ def add_goal(self, goal):
+ self.goals.append(goal)
+
+ def remove_goal(self, goal):
+ self.goals.remove(goal)
+
+ def add_action(self, action):
+ self.actions.append(action)
+
+ def remove_action(self, action):
+ self.actions.remove(action)
+
+ def filter_precept(self, precept):
+ """
+ precepts can be put through filters to change them.
+ this can be used to simulate errors in judgement by the agent.
+ """
+
+ for f in self.filters:
+ precept = f(precept)
+ if precept == None:
+ break
+
+ return precept
+
+ def handle_precept(self, pct):
+ """
+ used by the environment to feed the agent precepts.
+ agents can respond by sending back an action to take.
+ """
+
+ # give our filters a chance to change the precept
+ pct = self.filter_precept(pct)
+
+ # our filters may have caused us to ignore the precept
+ if pct == None: return None
+
+ print "[agent] {} recv'd pct {}".format(self, pct)
+
+ # this line has been added for debugging purposes
+ self.plan = []
+
+ if pct.sense == "position":
+ self.bb.post(Tag(position=pct.position, obj=pct.thing))
+
+ return self.next_action()
+
+ def replan(self):
+ """
+ force agent to re-evaluate goals and to formulate a plan
+ """
+
+ # get the relevancy of each goal according to the state of the agent
+ s = [ (g.get_relevancy(self.bb), g) for g in self.goals ]
+ s = [ g for g in s if g[0] > 0 ]
+ s.sort(reverse=True)
+
+ print "[agent] goals {}".format(s)
+
+ # starting for the most relevant goal, attempt to make a plan
+ for score, goal in s:
+ ok, plan = self.planner(
+ self,
+ self.actions,
+ self.current_action(),
+ self.bb,
+ goal)
+
+ if ok:
+ print "[agent] {} has planned to {}".format(self, goal)
+ pretty = list(reversed(plan[:]))
+ print "[agent] {} has plan {}".format(self, pretty)
+ return plan
+ else:
+ print "[agent] {} cannot {}".format(self, goal)
+
+ return []
+
+ def current_action(self):
+ try:
+ return self.plan[-1]
+ except IndexError:
+ return NullAction
+
+ def running_actions(self):
+ return self.current_action()
+
+ def next_action(self):
+ """
+ get the next action of the current plan
+ """
+
+ if self.plan == []:
+ self.plan = self.replan()
+
+ current_action = self.current_action()
+
+ # this action is done, so return the next one
+ if current_action.state == ACTIONSTATE_FINISHED:
+ return self.plan.pop()
+
+ # this action failed somehow
+ elif current_action.state == ACTIONSTATE_FAILED:
+ raise Exception, "action failed, don't know what to do now!"
+
+ # our action is still running, just run that
+ elif current_action.state == ACTIONSTATE_RUNNING:
+ return current_action
+
+
diff --git a/pygoap/blackboard.py b/pygoap/blackboard.py
new file mode 100644
index 0000000..6007eb5
--- /dev/null
+++ b/pygoap/blackboard.py
@@ -0,0 +1,169 @@
+"""
+Memories are stored precepts.
+A blackboard is a device to share information amongst actions.
+This implementation uses sqlite3 as a backend for storing memories.
+"""
+
+import sqlite3
+from sqlalchemy import *
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import relation, sessionmaker
+
+from collections import defaultdict
+
+DEBUG = 0
+
+Base = declarative_base()
+
+class Tag(object):
+ """
+ simple object for storing data on a blackboard
+ """
+
+ def __init__(self, **kwargs):
+ if 'kw' in kwargs.keys():
+ del kwargs['kw']
+ self.kw = kwargs
+ else:
+ self.kw = kwargs
+
+ def __repr__(self):
+ return "".format(self.kw)
+
+class Memory(Base):
+ __tablename__ = 'memory'
+
+ id = Column(Integer, primary_key=True)
+ owner = Column(String(255))
+ value = Column(String(255))
+
+ def __init__(self, owner, value):
+ self.owner = owner
+ self.value = value
+
+
+# initialize our database
+engine = create_engine('sqlite://')
+Base.metadata.create_all(engine)
+
+
+class MemoryManager(object):
+ """
+ a memory manager's purpose is to store precepts.
+
+ memories here should be able to be recalled quickly. like a blackboard,
+ this class is designed to have many users (not thread safe now).
+ """
+
+ def __init__(self, owner=None):
+ self.owner = owner
+
+ def add(self, precept, confidence=1.0):
+ """
+ Precepts may have a confidence leve associated with them as a metric
+ for sorting out precepts that may not be reliable.
+
+ This mechanism primarly exists for users of a shared blackboard where
+ they may post conflicting information.
+ """
+
+ Session = sessionmaker(bind=engine)
+ session = Session()
+
+ m = Memory(None, "")
+
+ try:
+ session.add(m)
+ session.commit()
+ except:
+ if DEBUG: print "error:", m
+ session.rollback()
+ raise
+
+ def search(self, tag, owner=None):
+ alldata = session.query(Memory).all()
+ for somedata in alldata:
+ if DEBUG: print somedata
+
+
+class Blackboard(object):
+ """
+ a blackboard is an abstract memory device.
+
+ alternative, more robust solution would be a xml backend or database
+
+ tags belong to a set of values that are known by the actions that an actor
+ would find useful. tag names are simple strings and have a value
+ associated with them. for simplicity, a blackboard could be used like a
+ standard python dictionary.
+
+ shared blackboards violate reality in that multiple agents share the same
+ thoughts, to extend the metaphore. but, the advantage of this is that in
+ a real-time simulation, it gives the player the impression that the agents
+ are able to collobroate in some meaningful way, without a significant
+ impact in performace.
+
+ that being said, i have chosen to restrict blackboards to a per-agent
+ basis. this library is meant for rpgs, where the action isn't real-time
+ and would require a more realistic simulation of intelligence.
+
+ however, i will still develop blackboards with the intention that they are
+ shared, so that in the future, it will be easier to simulate the borg-mind.
+ """
+
+ def __init__(self):
+ self.memory = []
+
+ def __eq__(self, other):
+ if isinstance(other, Blackboard):
+ return self.memory == other.memory
+ else:
+ return False
+
+ def post(self, tag):
+ if not isinstance(tag, Tag):
+ m="Only instances of tag objects can be stored on a blackboard"
+ raise ValueError, m
+
+ d = tag.kw.copy()
+ self.memory.append(d)
+
+ def read(self, *args, **kwargs):
+ """
+ return any data that match the keywords in the function call
+ returns a list of dictionaries
+ """
+
+ if (args == ()) and (kwargs == {}):
+ return self.memory
+
+ tags = []
+ r = []
+
+ if DEBUG: print "[bb] args {}".format(args)
+ if DEBUG: print "[bb] kwargs {}".format(kwargs)
+
+ def check_args(tag):
+ if args == ():
+ return True
+ else:
+ keys = tag.keys()
+ return all([ a in keys for a in args ])
+
+ def check_kwargs(tag):
+ if kwargs == {}:
+ return True
+ else:
+ raise ValueError, "Blackboards do not support search...yet"
+
+ for tag in self.memory:
+ if DEBUG: print "[bb] chk args {} {}".format(tag, check_args(tag))
+ if DEBUG: print "[bb] chk kw {} {}".format(tag, check_kwargs(tag))
+ if check_args(tag) and check_kwargs(tag):
+ r.append(tag)
+
+ #r.reverse()
+ return r
+
+ def update(self, other):
+ self.memory.extend(other.memory)
diff --git a/pygoap/environment.py b/pygoap/environment.py
new file mode 100644
index 0000000..48a16df
--- /dev/null
+++ b/pygoap/environment.py
@@ -0,0 +1,186 @@
+"""
+Since a pyGOAP agent relies on cues from the environment when planning, having
+a stable and effecient virtual environment is paramount.
+
+When coding your game or simulation, you can think of the environment as the
+conduit that connects your actors on screen to their simulated thoughts. This
+environment simply provides enough basic information to the agents to work. It
+is up to you to make it useful.
+
+objects should be able to produce actions that would be useful. this concept
+comes from the sims, where each agent doesn't need to know how to use every
+object, but can instead query the object for things to do with it.
+"""
+
+from actionstates import *
+from objectflags import *
+from itertools import chain, repeat, product, izip
+
+
+
+class ObjectBase(object):
+ """
+ class for objects that agents can interact with
+ """
+
+ def __init__(self, name):
+ self.name = name
+ self.inventory = []
+
+ def handle_precept(self, precept):
+ pass
+
+ def add(self, other, origin):
+ """
+ add something to this object's inventory
+ the object must have an origin.
+ the original position of the object will lose this object:
+ dont remove it manually!
+ """
+
+ origin.inventory.remove(other)
+ self.inventory.append(other)
+
+ def remove(self, obj):
+ """
+ remove something from this object's inventory
+ """
+
+ self.inventory.remove(other)
+
+ def get_actions(self, other):
+ """
+ generate a list of actions that could be used with this object
+ """
+ return []
+
+ def __repr__(self):
+ return "".format(self.name)
+
+
+class Precept(object):
+ """
+ This is the building block class for how an agent interacts with the
+ simulated environment.
+ """
+
+ def __init__(self, *arg, **kwargs):
+ self.__dict__.update(kwargs)
+
+ def __repr__(self):
+ return "" % self.__dict__
+
+
+class Environment(object):
+ """Abstract class representing an Environment. 'Real' Environment classes
+ inherit from this.
+ The environment keeps a list of .objects and .agents (which is a subset
+ of .objects). Each agent has a .performance slot, initialized to 0.
+ """
+
+ def __init__(self, things=[], agents=[], time=0):
+ self.time = time
+ self.agents = []
+ self.things = []
+
+ [ self.add_thing(i) for i in things ]
+ [ self.add_thing(i) for i in agents ]
+
+ self.action_que = []
+
+ def default_position(self, object):
+ """
+ Default position to place a new object with unspecified position.
+ """
+
+ raise NotImplementedError
+
+ def run(self, steps=1000):
+ """
+ Run the Environment for given number of time steps.
+ """
+
+ [ self.update(1) for step in xrange(steps) ]
+
+ def add_thing(self, thing, position=None):
+ """
+ Add an object to the environment, setting its position. Also keep
+ track of objects that are agents. Shouldn't need to override this.
+ """
+
+ from agent import GoapAgent
+
+ thing.position = position or self.default_position(thing)
+ self.things.append(thing)
+
+ print "[env] adding {}".format(thing)
+
+ # add the agent
+ if isinstance(thing, GoapAgent):
+ self.agents.append(thing)
+ thing.performance = 0
+ thing.environment = self
+
+ # for simplicity, agents always know where they are
+ i = Precept(sense="position", thing=thing, position=thing.position)
+ thing.handle_precept(i)
+
+ # should update vision for all interested agents (correctly, that is)
+ [ self.look(a) for a in self.agents if a != thing ]
+
+ def update(self, time_passed):
+ """
+ * Update our time
+ * Let agents know time has passed
+ * Update actions that may be running
+ * Add new actions to the que
+
+ this could be rewritten.
+ """
+
+ # update time in the simulation
+ self.time += time_passed
+
+ # let all the agents know that time has passed
+ # bypass the modeler for simplicity
+ p = Precept(sense="time", time=self.time)
+ [ a.handle_precept(p) for a in self.agents ]
+
+ # update all the actions that may be running
+ precepts = [ a.update(time_passed) for a in self.action_que ]
+ precepts = [ p for p in precepts if not p == None ]
+
+ # get all the running actions for the agents
+ self.action_que = chain([ a.running_actions() for a in self.agents ])
+
+ # start any actions that are not started
+ [ action.start() for action in self.action_que
+ if action.state == ACTIONSTATE_NOT_STARTED ]
+
+ def broadcast_precepts(self, precepts, agents=None):
+ """
+ for effeciency, please use this for sending a list of precepts
+ """
+
+ if agents == None:
+ agents = self.agents
+
+ model = self.model_precept
+
+ for p in precepts:
+ [ a.handle_precept(model(p, a)) for a in agents ]
+
+ def model_precept(self, precept, other):
+ """
+ override this to model the way that precept objects move in the
+ simulation. by default, all precept objects will be distributed
+ indiscrimitely to all agents.
+
+ while this behaviour may be desireable for some types of precepts,
+ it doesn't make sense in many.
+
+ the two big thigs to model here would be vision and sound.
+ """
+
+ return precept
+
diff --git a/pygoap/environment2d.py b/pygoap/environment2d.py
new file mode 100644
index 0000000..d6c3df2
--- /dev/null
+++ b/pygoap/environment2d.py
@@ -0,0 +1,137 @@
+"""
+Since a pyGOAP agent relies on cues from the environment when planning, having
+a stable and effecient virtual environment is paramount. This environment is
+simply a placeholder and demonstration.
+
+When coding your game or simulation, you can think of the environment as the
+conduit that connects your actors on screen to their simulated thoughts. This
+environment simply provides enough basic information to the agents to work. It
+is up to you to make it useful.
+"""
+
+from pygoap.agent import GoapAgent
+from environment import Environment, Precept
+import random, math
+
+
+
+def distance((ax, ay), (bx, by)):
+ "The distance between two (x, y) points."
+ return math.hypot((ax - bx), (ay - by))
+
+def distance2((ax, ay), (bx, by)):
+ "The square of the distance between two (x, y) points."
+ return (ax - bx)**2 + (ay - by)**2
+
+def clip(vector, lowest, highest):
+ """Return vector, except if any element is less than the corresponding
+ value of lowest or more than the corresponding value of highest, clip to
+ those values.
+ >>> clip((-1, 10), (0, 0), (9, 9))
+ (0, 9)
+ """
+ return type(vector)(map(min, map(max, vector, lowest), highest))
+
+
+class Pathfinding2D(object):
+ def get_surrounding(self, position):
+ """
+ Return all positions around this one.
+ """
+
+ x, y = position
+ return ((x-1, y-1), (x-1, y), (x-1, y+1), (x, y-1), (x, y+1),
+ (x+1, y-1), (x+1, y), (x+1, y+1))
+
+ def calc_h(self, position1, position2):
+ return distance(position1, position2)
+
+
+class XYEnvironment(Environment, Pathfinding2D):
+ """
+ This class is for environments on a 2D plane.
+
+ This class is featured enough to run a simple simulation.
+ """
+
+ def __init__(self, width=10, height=10):
+ super(XYEnvironment, self).__init__()
+ self.width = width
+ self.height = height
+
+ def model_vision(self, precept, origin, terminus):
+ return precept
+
+ def model_sound(self, precept, origin, terminus):
+ return precept
+
+ def look(self, caller, direction=None, distance=None):
+ """
+ Simulate vision by sending precepts to the caller.
+ """
+
+ # a more intelligent approach would limit the number of agents
+ # to logical limit, ie: ones that could possibly been seen
+ agents = self.things[:]
+ agents.remove(caller)
+
+ model = self.model_precept
+ for a in agents:
+ p = Precept(sense='position', thing=a, position=a.position)
+ caller.handle_precept(model(p, caller))
+
+ def move(self, thing, pos):
+ """
+ move an object in the world
+ """
+
+ thing.position = pos
+
+ print "[env] move {} to {}".format(thing, pos)
+
+ [ self.look(a) for a in self.agents if a != thing ]
+
+ def objects_at(self, position):
+ """
+ Return all objects exactly at a given position.
+ """
+
+ return [ obj for obj in self.things if obj.position == position ]
+
+ def objects_near(self, position, radius):
+ """
+ Return all objects within radius of position.
+ """
+
+ radius2 = radius * radius
+ return [ obj for obj in self.things
+ if distance2(position, obj.position) <= radius2 ]
+
+ def default_position(self, thing):
+ loc = (random.randint(0, self.width), random.randint(0, self.height))
+ return (self, loc)
+
+ def model_precept(self, precept, other):
+ if precept.sense == "vision":
+ return precept
+
+ if precept.sense == "sound":
+ return precept
+
+ return precept
+
+ def can_move_from(self, agent, dist=100):
+ """
+ return a list of positions that are possible for this agent to be
+ in if it were to move [dist] spaces or less.
+ """
+
+ x, y = agent.position[1]
+ pos = []
+
+ for xx in xrange(x - dist, x + dist):
+ for yy in xrange(y - dist, y + dist):
+ if distance2((xx, yy), (x, y)) <= dist:
+ pos.append((self, (xx, yy)))
+
+ return pos
diff --git a/pygoap/goals.py b/pygoap/goals.py
new file mode 100644
index 0000000..d7b9b27
--- /dev/null
+++ b/pygoap/goals.py
@@ -0,0 +1,290 @@
+"""
+Goals in the context of a pyGOAP agent give the planner some direction when
+planning. Goals are known to the agent and are constantly monitored and
+evaluated. The agent will attempt to choose the most relevant goal for it's
+state (determined by the blackboard) and then the planner will determine a
+plan for the agent to follw that will (possibly) satisfy the chosen goal.
+
+See the modules effects.py and goals.py to see how these are used.
+
+test() should return a float from 0-1 on how successful the action would be
+if carried out with the given state of the bb.
+
+touch() should modify a bb in some meaningful way as if the action was
+finished successfully.
+"""
+
+from planning import GoalBase
+from blackboard import Tag
+import sys
+
+
+DEBUG = 0
+
+class SimpleGoal(GoalBase):
+ """
+ Goal that uses a dict to match precepts stored on a bb.
+ """
+
+ def test(self, bb):
+ #f = [ k for (k, v) in self.kw.items() if v == False]
+
+ for tag in bb.read():
+
+ if tag == self.kw:
+ return 1.0
+
+ return 0.0
+
+ def touch(self, bb):
+ bb.post(Tag(**self.kw))
+
+ def __repr__(self):
+ return "<{}=\"{}\">".format(self.__class__.__name__, self.kw)
+
+
+class EvalGoal(GoalBase):
+ """
+ uses what i think is a somewhat safe way of evaluating python statements.
+
+ feel free to contact me if you have a better way
+ """
+
+ def test(self, bb):
+
+ condition = self.args[0]
+
+ # this only works for simple expressions
+ cmpop = (">", "<", ">=", "<=", "==")
+
+ i = 0
+ index = 0
+ expr = condition.split()
+ while index == 0:
+ try:
+ index = expr.index(cmpop[i])
+ except:
+ i += 1
+ if i > 5: break
+
+ try:
+ side0 = float(eval(" ".join(expr[:index]), bb))
+ side1 = float(eval(" ".join(expr[index+1:]), bb))
+ except NameError:
+ return 0.0
+
+ cmpop = cmpop[i]
+
+ if (cmpop == ">") or (cmpop == ">="):
+ if side0 == side1:
+ return 1.0
+ elif side0 > side1:
+ v = side0 / side1
+ elif side0 < side1:
+ if side0 == 0:
+ return 0.0
+ else:
+ v = 1 - ((side1 - side0) / side1)
+
+ if v > 1: v = 1.0
+ if v < 0: v = 0.0
+
+ return v
+
+ def touch(self, bb):
+ def do_it(expr, d):
+
+ try:
+ exec expr in d
+ except NameError as detail:
+ # get name of missing variable
+ name = detail[0].split()[1].strip('\'')
+ d[name] = 0
+ do_it(expr, d)
+
+ return d
+
+ d = {}
+ d['__builtins__'] = None
+ d = do_it(self.args[0], d)
+
+ # the bb was modified
+ bb.post(Tag(kw=d))
+
+ return True
+
+
+class AlwaysValidGoal(GoalBase):
+ """
+ Will always be valid.
+ """
+
+ def test(self, bb):
+ return 1.0
+
+ def touch(self, bb, tag):
+ pass
+
+
+
+class NeverValidGoal(GoalBase):
+ """
+ Will never be valid.
+ """
+
+ def test(self, bb):
+ return 0.0
+
+ def touch(self, bb, tag):
+ pass
+
+
+
+class PositionGoal(GoalBase):
+ """
+ This validator is for finding the position of objects.
+ """
+
+ def test(self, bb):
+ """
+ search memory for last known position of the target if target is not
+ in agent's memory return 0.0.
+
+ do pathfinding and determine if the target is accessable
+ - if not return 0.0
+
+ determine the distance required to travel to the target
+ return 1.0 if the target is reachable
+ """
+
+ target = None
+ target_position = None
+ tags = bb.read("position")
+
+ if DEBUG: print "[PositionGoal] testing {}".format(self.kw)
+
+ for tag in tags:
+ target = tag['obj']
+ for k, v in self.kw.items():
+ try:
+ value = getattr(target, k)
+ except AttributeError:
+ continue
+
+ if not v == value:
+ continue
+
+ target_position = tag['position']
+ break
+ else:
+ continue
+ break
+
+ if target_position:
+ if DEBUG: print "[PositionGoal] {} {}".format(self.kw['owner'], target)
+ return 1.0
+
+ d = distance(position, target_position)
+ if d > self.dist:
+ return (float(self.dist / d)) * float(self.dist)
+ elif d <= self.dist:
+ return 1.0
+ else:
+ return 0.0
+
+
+ def touch(self, bb):
+
+ # this needs to be the same as what handle_precept() of an agent
+ # would post if it had recv'd this from the environment
+ tag = Tag(obj=self.kw['target'],
+ position=self.kw['position'])
+
+ bb.post(tag)
+
+class HasItemGoal(GoalBase):
+ """
+ returns true if item is in inventory (according to bb)
+
+ when creating instance, 'owner' must be passed as a keyword.
+ its value can be any game object that is capable of holding an object
+
+ NOTE: testing can be true to many different objects,
+ but touching requires a specific object to function
+
+ any other keyword will be evaluated against tags in the bb passed.
+ """
+
+ def __init__(self, owner, target=None, **kwargs):
+ super(HasItemGoal, self).__init__(self)
+
+ self.owner = owner
+ self.target = None
+
+ if target:
+ self.target = target
+ else:
+ try:
+ self.target = kwargs['target']
+ except KeyError:
+ pass
+
+ if (self.target == None) and (kwargs == {}):
+ raise Exception, "HasItemGoal needs more information"
+
+
+ def test(self, bb):
+ for tag in bb.read("position"):
+ if (tag['position'][0] == self.owner) and \
+ tag['obj'] == self.target:
+ return 1.0
+
+ return 0.0
+
+ def touch(self, bb):
+ # this has to be the same tag that the agent would add to its bb
+ tag = Tag(obj=self.target, position=(self.owner, 0))
+
+ if DEBUG: print "[HasItem] {} touch {}".format(self, tag)
+
+ bb.post(Tag(obj=self.target, position=(self.owner, 0)))
+
+"""
+
+code for seaching a blackboard based on keywords
+originally from hasitemgoal
+
+ target = None
+ target_position = None
+ tags = bb.read("position")
+
+ tags.reverse()
+
+ for tag in tags:
+ target = tag['obj']
+ for k, v in self.kw.items():
+ if k == 'owner':
+ continue
+
+ try:
+ value = getattr(target, k)
+ except AttributeError:
+ continue
+
+ if not v == value:
+ continue
+
+ target_position = tag['position']
+ break
+ else:
+ continue
+ break
+
+ if target_position:
+ print "[HasItem] {} {}".format(self.owner, self.target)
+ print " {} {}".format(self.owner, target_position)
+ if target_position[0] == owner:
+ print "[HasItem] {} {}".format(self.owner, self.target)
+ return 1.0
+
+"""
diff --git a/pygoap/goaltests.py b/pygoap/goaltests.py
new file mode 100644
index 0000000..c5c2eee
--- /dev/null
+++ b/pygoap/goaltests.py
@@ -0,0 +1,2 @@
+import unittest
+
diff --git a/pygoap/objectflags.py b/pygoap/objectflags.py
new file mode 100644
index 0000000..3ffcad0
--- /dev/null
+++ b/pygoap/objectflags.py
@@ -0,0 +1,20 @@
+states = """
+liquid
+glowing
+hot
+frozen
+burning
+normal
+dead
+dying
+bleeding
+cracked
+broken
+hard
+soft
+sticky
+ooze
+gas
+""".strip().split('\n')
+
+
diff --git a/pygoap/planning.py b/pygoap/planning.py
new file mode 100644
index 0000000..d03c56a
--- /dev/null
+++ b/pygoap/planning.py
@@ -0,0 +1,263 @@
+"""
+Goals and prereqs are related. The Vaildator class system removes the need to
+duplicate similar functions.
+"""
+
+from blackboard import Blackboard
+from heapq import heappop, heappush, heappushpop
+import sys
+
+
+
+DEBUG = 0
+
+def get_children(caller, parent, actions, dupe_parent=False):
+ """
+ get the children of this action
+
+ behaves like a tree search, kinda, not really sure, actually
+ return every other action on this branch that has not already been used
+ """
+
+ def keep_node(node):
+ # verify node is ok by making sure it is not duplicated in it's branch
+
+ keep = True
+
+ node0 = node.parent
+ while not node0.parent == None:
+ if node0.parent == node:
+ keep = False
+ break
+ node0 = node0.parent
+
+ return keep
+
+ children = []
+
+ if DEBUG: print "[plan] actions: {}".format([a for a in actions])
+
+ for a in actions:
+ if DEBUG: print "[plan] checking {}".format(a)
+ for child in a.get_actions(caller, parent.bb):
+ node = PlanningNode(parent, child)
+
+ if keep_node(node):
+ #if DEBUG: print "[plan] got child {}".format(child)
+ children.append(node)
+
+ return children
+
+
+def calcG(node):
+ cost = node.cost
+ while not node.parent == None:
+ node = node.parent
+ cost += node.cost
+ return cost
+
+
+class PlanningNode(object):
+ """
+ each node has a copy of a bb (self.bb) in order to simulate a plan.
+ """
+
+ def __init__(self, parent, action, bb=None):
+ self.parent = parent
+ self.action = action
+ self.bb = Blackboard()
+ self.delta = Blackboard()
+ #self.cost = action.calc_cost()
+ self.cost = 1
+ self.g = calcG(self)
+ self.h = 1
+
+ if not parent == None:
+ self.bb.update(parent.bb)
+
+ elif not bb == None:
+ self.bb.update(bb)
+
+ action.touch(self.delta)
+ self.bb.update(self.delta)
+
+ def __eq__(self, other):
+ if isinstance(other, PlanningNode):
+ #if DEBUG: print "[cmp] {} {}".format(self.delta.memory, other.delta.memory)
+ return self.delta == other.delta
+ else:
+ return False
+
+ def __repr__(self):
+ try:
+ return "" % \
+ (self.action.__name__,
+ self.cost,
+ self.parent.action.__class__.__name__)
+
+ except AttributeError:
+ return "" % \
+ (self.action.__class__.__name__,
+ self.cost)
+
+class GoalBase(object):
+ """
+ Goals:
+ can be satisfied.
+ can be valid
+
+ This is meant to be a superclass along with a validator class to create
+ goals and action prereqs at runtime. When creating designing subclasses,
+ sibling superclass should be a validator.
+
+ Goals, ActionPrereqs and ActionEffects are now that same class. They share
+ so much functionality and are so logically similar that they have been made
+ into one class.
+
+ The only difference is how they are used. If a goal is used by the planner
+ then that will be the final point of the plan. if it is used in
+ conjunction with an action, then it will function as a prereq.
+ """
+
+ def __init__(self, *args, **kwargs):
+ try:
+ self.condition = args[0]
+ except IndexError:
+ self.condition = None
+
+ self.value = 1.0
+ self.args = args
+ self.kw = kwargs
+
+ self.satisfied = self.test
+
+ def touch(self, bb):
+ if DEBUG: print "[debug] goal {} has no touch method".format(self)
+
+ def test(self, bb):
+ if DEBUG: print "[debug] goal {} has no test method".format(self)
+
+ def get_relevancy(self, bb):
+ """
+ will return the "relevancy" value for this goal/prereq.
+
+ as a general rule, the return value here should never equal
+ what is returned from test()
+ """
+
+ if not self.test(bb): return self.value
+ return 0.0
+
+
+ def self_test(self):
+ """
+ make sure the goal is sane
+ """
+
+ bb = Blackboard()
+ self.touch(bb)
+ assert self.test(bb) == True
+
+
+ def __repr__(self):
+ return "<{}>".format(self.__class__.__name__)
+
+
+class InstancedAction(object):
+ """
+ This action is suitable as a generic 'idling' action.
+ """
+
+ builder = None
+
+ def __init__(self):
+ self.state = None
+
+ def touch(self, bb):
+ if DEBUG: print "[debug] action {} has no touch method".format(self)
+
+ def test(self, bb):
+ if DEBUG: print "[debug] action {} has no test method".format(self)
+
+ def __repr__(self):
+ return self.__class__.__name__
+
+
+def plan(caller, actions, start_action, start_blackboard, goal):
+ """
+ differs slightly from normal astar in that:
+ there are no connections between nodes
+ the state of the "map" changes as the nodes are traversed
+ there is no closed list (behaves like a tree search)
+ hueristics are not available
+
+ this is not implied to be correct or effecient
+ """
+
+ # the pushback is used to limit node access in the heap
+ pushback = None
+ success = False
+
+ keyNode = PlanningNode(None, start_action, start_blackboard)
+
+ openlist = [(0, keyNode)]
+
+ # the root can return a copy of itself, the others cannot
+ # this allows the planner to produce plans that duplicate actions
+ # this feature is currently on a hiatus
+ return_parent = 0
+
+ if DEBUG: print "[plan] solve {} planning {}".format(goal, start_action)
+
+ while openlist or pushback:
+
+ # get the best node.
+ if pushback == None:
+ keyNode = heappop(openlist)[1]
+ else:
+ keyNode = heappushpop(
+ openlist, (pushback.g + pushback.h, pushback))[1]
+
+ pushback = None
+
+ if DEBUG: print "[plan] testing action {}".format(keyNode.action)
+ if DEBUG: print "[plan] against bb {}".format(keyNode.bb.read())
+
+ # if our goal is satisfied, then stop
+ #if (goal.satisfied(keyNode.bb)) and (return_parent == 0):
+ if goal.test(keyNode.bb):
+ success = True
+ if DEBUG: print "[plan] successful {}".format(keyNode.action)
+ break
+
+ for child in get_children(caller, keyNode, actions, return_parent):
+ if child in openlist:
+ possG = keyNode.g + child.cost
+ if (possG < child.g):
+ child.parent = keyNode
+ child.g = calcG(child)
+ # TODO: update the h score
+ else:
+ # add node to our openlist, using pushpack if needed
+ if pushback == None:
+ heappush(openlist, (child.g + child.h, child))
+ else:
+ heappush(openlist, (pushback.g + pushback.h, pushback))
+ pushpack = child
+
+ return_parent = 0
+
+ if success:
+ path0 = [keyNode.action]
+ path1 = [keyNode]
+ while not keyNode.parent == None:
+ keyNode = keyNode.parent
+ path0.append(keyNode.action)
+ path1.append(keyNode)
+
+ return True, path0
+
+ else:
+ return False, []
+
+
diff --git a/pygoap/tiledenvironment.py b/pygoap/tiledenvironment.py
new file mode 100644
index 0000000..bbe6a23
--- /dev/null
+++ b/pygoap/tiledenvironment.py
@@ -0,0 +1,42 @@
+from environment2d import XYEnvironment
+import tmxloader
+from pygame import Surface
+
+
+
+class TiledEnvironment(XYEnvironment):
+ """
+ Environment that can use Tiled Maps
+ """
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.tiledmap = tmxloader.load_pygame(self.filename)
+
+ super(TiledEnvironment, self).__init__()
+
+ def render(self, surface):
+ # not going for effeciency here
+
+ for l in xrange(0, len(self.tiledmap.layers)):
+ for y in xrange(0, self.tiledmap.height):
+ for x in xrange(0, self.tiledmap.width):
+ tile = self.tiledmap.get_tile_image(x, y, l)
+ xx = x * self.tiledmap.tilewidth
+ yy = y * self.tiledmap.tileheight
+ if not tile == 0:
+ surface.blit(tile, (xx, yy))
+
+ for t in self.things:
+ x, y = t.position[1]
+ x *= self.tiledmap.tilewidth
+ y *= self.tiledmap.tileheight
+
+ s = Surface((self.tiledmap.tilewidth, self.tiledmap.tileheight))
+ s.fill((128,0,0))
+
+ surface.blit(s, (x, y))
+
+ def __repr__(self):
+ return "T-Env"
+
diff --git a/pygoap/tmxloader.py b/pygoap/tmxloader.py
new file mode 100644
index 0000000..f575bd4
--- /dev/null
+++ b/pygoap/tmxloader.py
@@ -0,0 +1,625 @@
+"""
+Map loader for TMX Files
+bitcraft (leif.theden at gmail.com)
+v.7 - for python 2.7
+
+If you have any problems, please contact me via email.
+Tested with Tiled 0.7.1 for Mac.
+
+======================================================================
+
+This map loader can be used to load maps created in the Tiled map
+editor. It provides a simple way to get tiles and associated metadata
+so that you can draw a map onto the screen.
+
+This is not a rendering engine. It will load the data that is
+necessary to render a map onto the screen. All tiles will be loaded
+into in memory and available to blit onto the screen.
+
+
+Design Goals:
+ Simple api
+ Memory efficient and fast
+ Quick access to tiles, attributes, and properties
+
+Non-Goals:
+ Rendering
+
+Works:
+ Image loading with pygame
+ Map loading with all required types
+ Properties for all types: maps, layers, objects, tiles
+ Automatic flipping of tiles
+ Supports csv, gzip, zlib and uncompressed TMX
+
+Todo:
+ Pygame: test colorkey transparency
+
+Optimized for maps that do not make heavy use of tile
+properties. If I find that it is used a lot then I can rework
+it for better performance.
+
+======================================================================
+
+Basic usage sample:
+
+ >>> import tmxloader
+ >>> tiledmap = tmxloader.load_pygame("map.tmx")
+
+
+When you want to draw tiles, you simply call "get_tile_image":
+
+ >>> image = tiledmap.get_tile_image(x, y, layer)
+ >>> screen.blit(position, image)
+
+
+Layers, objectgroups, tilesets, and maps all have a simple way to access
+metadata that was set inside tiled: they all become class attributes.
+
+ >>> print layer.tilewidth
+ 32
+ >>> print layer.weather
+ 'sunny'
+
+
+Tiles are the exception here, and must be accessed through "getTileProperties"
+and are regular Python dictionaries:
+
+ >>> tile = tiledmap.getTileProperties(x, y, layer)
+ >>> tile["name"]
+ 'CobbleStone'
+
+"""
+
+from itertools import chain
+
+
+# internal flags
+FLIP_X = 1
+FLIP_Y = 2
+
+
+# Tiled gid flags
+GID_FLIP_X = 1<<31
+GID_FLIP_Y = 1<<30
+
+
+class TiledElement(object):
+ pass
+
+class TiledMap(TiledElement):
+ """
+ not really useful unless "loaded" ie: don't instance directly.
+ see the pygame loader for inspiration
+ """
+
+ def __init__(self):
+ TiledElement.__init__(self)
+ self.layers = [] # list of all layer types (tile layers + object layers)
+ self.tilesets = [] # list of TiledTileset objects
+ self.tilelayers = [] # list of TiledLayer objects
+ self.objectgroups = [] # list of TiledObjectGroup objects
+ self.tile_properties = {} # dict of tiles that have additional metadata (properties)
+ self.filename = None
+
+ # this is a work around to tiled's strange way of storing gid's
+ self.images = [0]
+
+ # defaults from the TMX specification
+ self.version = 0.0
+ self.orientation = None
+ self.width = 0
+ self.height = 0
+ self.tilewidth = 0
+ self.tileheight = 0
+
+ def get_tile_image(self, x, y, layer):
+ """
+ return the tile image for this location
+ x and y must be integers and are in tile coordinates, not pixel
+
+ return value will be 0 if there is no tile with that location.
+ """
+
+ try:
+ gid = self.tilelayers[layer].data[y][x]
+ except (IndexError, ValueError):
+ msg = "Coords: ({0},{1}) in layer {2} is invalid.".format(x, y, layer)
+ raise Exception, msg
+
+ else:
+ try:
+ return self.images[gid]
+ except (IndexError, ValueError):
+ msg = "Coords: ({0},{1}) in layer {2} has invaid GID: {3}/{4}.".format(x, y, layer, gid, len(self.images))
+ raise Exception, msg
+
+ def getTileGID(self, x, y, layer):
+ """
+ return GID of a tile in this location
+ x and y must be integers and are in tile coordinates, not pixel
+ """
+
+ try:
+ return self.tilelayers[layer].data[y][x]
+ except (IndexError, ValueError):
+ msg = "Coords: ({0},{1}) in layer {2} is invalid.".format(x, y, layer)
+ raise Exception, msg
+
+ def getDrawOrder(self):
+ """
+ return a list of objects in the order that they should be drawn
+ this will also exclude any layers that are not set to visible
+
+ may be useful if you have objects and want to control rendering
+ from tiled
+ """
+
+ raise NotImplementedError
+
+ def getTileImages(self, r, layer):
+ """
+ return a group of tiles in an area
+ expects a pygame rect or rect-like list/tuple
+
+ usefull if you don't want to repeatedly call get_tile_image
+ probably not the most effecient way of doing this, but oh well.
+ """
+
+ raise NotImplementedError
+
+ def getObjects(self):
+ """
+ Return iterator all of the objects associated with this map
+ """
+
+ return chain(*[ i.objects for i in self.objectgroups ])
+
+ def getTileProperties(self, x, y, layer):
+ """
+ return the properties for the tile, if any
+ x and y must be integers and are in tile coordinates, not pixel
+
+ returns a dict of there are properties, otherwise will be None
+ """
+
+ try:
+ gid = self.tilelayers[layer].data[y][x]
+ except (IndexError, ValueError):
+ msg = "Coords: ({0},{1}) in layer {2} is invalid.".format(x, y, layer)
+ raise Exception, msg
+
+ else:
+ try:
+ return self.tile_properties[gid]
+ except (IndexError, ValueError):
+ msg = "Coords: ({0},{1}) in layer {2} has invaid GID: {3}/{4}.".format(x, y, layer, gid, len(self.images))
+ raise Exception, msg
+
+ def getTilePropertiesByGID(self, gid):
+ try:
+ return self.tile_properties[gid]
+ except KeyError:
+ return None
+
+# the following classes get their attributes filled in with the loader
+
+class TiledTileset(TiledElement):
+ def __init__(self):
+ TiledElement.__init__(self)
+
+ # defaults from the specification
+ self.firstgid = 0
+ self.lastgid = 0
+ self.name = None
+ self.tilewidth = 0
+ self.tileheight = 0
+ self.spacing = 0
+ self.margin = 0
+
+class TiledLayer(TiledElement):
+ def __init__(self):
+ TiledElement.__init__(self)
+ self.data = None
+
+ # defaults from the specification
+ self.name = None
+ self.opacity = 1.0
+ self.visible = 1
+
+class TiledObjectGroup(TiledElement):
+ def __init__(self):
+ TiledElement.__init__(self)
+ self.objects = []
+
+ # defaults from the specification
+ self.name = None
+
+class TiledObject(TiledElement):
+ __slots__ = ['name', 'type', 'x', 'y', 'width', 'height', 'gid']
+
+ def __init__(self):
+ TiledElement.__init__(self)
+
+ # defaults from the specification
+ self.name = None
+ self.type = None
+ self.x = 0
+ self.y = 0
+ self.width = 0
+ self.height = 0
+ self.gid = 0
+
+
+def load_tmx(filename):
+ """
+ Utility function to parse a Tiled TMX and return a usable object.
+ Images will not be loaded, so probably not useful to call this directly
+
+ See the load_pygame func for an idea of what to do if you want to extend
+ this further.
+ """
+
+ from xml.dom.minidom import parse
+ from itertools import tee, islice, izip, chain, imap
+ from collections import defaultdict
+ from struct import unpack
+ import array, os
+
+ # used to change the unicode string returned from minidom to
+ # proper python variable types.
+ types = {
+ "version": float,
+ "orientation": str,
+ "width": int,
+ "height": int,
+ "tilewidth": int,
+ "tileheight": int,
+ "firstgid": int,
+ "source": str,
+ "name": str,
+ "spacing": int,
+ "margin": int,
+ "source": str,
+ "trans": str,
+ "id": int,
+ "opacity": float,
+ "visible": bool,
+ "encoding": str,
+ "compression": str,
+ "gid": int,
+ "type": str,
+ "x": int,
+ "y": int,
+ "value": str,
+ }
+
+ def pairwise(iterable):
+ # return a list as a sequence of pairs
+ a, b = tee(iterable)
+ next(b, None)
+ return izip(a, b)
+
+ def group(l, n):
+ # return a list as a sequence of n tuples
+ return izip(*[islice(l, i, None, n) for i in xrange(n)])
+
+ def parse_properties(node):
+ """
+ parse a node and return a dict that represents a tiled "property"
+ """
+
+ d = {}
+
+ for child in node.childNodes:
+ if child.nodeName == "properties":
+ for subnode in child.getElementsByTagName("property"):
+ # the "properties" from tiled's tmx have an annoying
+ # quality that "name" and "value" is included as part of it.
+ # so we mangle it to get that stuff out.
+ d.update(dict(pairwise([ str(i.value) for i in subnode.attributes.values() ])))
+
+ return d
+
+ def get_properties(node):
+ """
+ parses a node and returns a dict that contains the data from the node's
+ attributes and any data from "property" elements as well.
+ """
+
+ d = {}
+
+ # get tag attributes
+ d.update(get_attributes(node))
+
+ # get vlues of the properties element, if any
+ d.update(parse_properties(node))
+
+ return d
+
+ def set_properties(obj, node):
+ """
+ read the xml attributes and tiled "properties" from a xml node and fill in
+ the values into an object's dictionary
+ """
+
+ [ setattr(obj, k, v) for k,v in get_properties(node).items() ]
+
+ def get_attributes(node):
+ """
+ get the attributes from a node and fix them to the correct type
+ """
+
+ d = defaultdict(lambda:None)
+
+ for k, v in node.attributes.items():
+ k = str(k)
+ d[k] = types[k](v)
+
+ return d
+
+ def decode_gid(raw_gid):
+ # gid's are encoded with extra information
+ # as of 0.7.0 it determines if the tile should be flipped when rendered
+
+ flags = 0
+ if raw_gid & GID_FLIP_X == GID_FLIP_X: flags += FLIP_X
+ if raw_gid & GID_FLIP_Y == GID_FLIP_Y: flags += FLIP_Y
+ gid = raw_gid & ~(GID_FLIP_X | GID_FLIP_Y)
+
+ return gid, flags
+
+
+ def parse_map(node):
+ """
+ parse a map node from a tiled tmx file
+ return a tiledmap
+ """
+
+ tiledmap = TiledMap()
+ tiledmap.filename = filename
+ set_properties(tiledmap, map_node)
+
+ for node in map_node.getElementsByTagName("tileset"):
+ t, tiles = parse_tileset(node)
+ tiledmap.tilesets.append(t)
+ tiledmap.tile_properties.update(tiles)
+
+ for node in dom.getElementsByTagName("layer"):
+ l = parse_layer(tiledmap.tilesets, node)
+ tiledmap.tilelayers.append(l)
+ tiledmap.layers.append(l)
+
+ for node in dom.getElementsByTagName("objectgroup"):
+ o = parse_objectgroup(node)
+ tiledmap.objectgroups.append(o)
+ tiledmap.layers.append(o)
+
+ return tiledmap
+
+
+ def parse_tileset(node, firstgid=None):
+ """
+ parse a tileset element and return a tileset object and properties for tiles as a dict
+ """
+
+ tileset = TiledTileset()
+ set_properties(tileset, node)
+ tiles = {}
+
+ if firstgid != None:
+ tileset.firstgid = firstgid
+
+ # since tile objects probably don't have a lot of metadata,
+ # we store it seperately from the class itself
+ for child in node.childNodes:
+ if child.nodeName == "tile":
+ p = get_properties(child)
+ gid = p["id"] + tileset.firstgid
+ del p["id"]
+ tiles[gid] = p
+
+ # check for tiled "external tilesets"
+ if hasattr(tileset, "source"):
+ if tileset.source[-4:].lower() == ".tsx":
+ try:
+ # we need to mangle the path some because tiled stores relative paths
+ path = os.path.join(os.path.dirname(filename), tileset.source)
+ tsx = parse(path)
+ except IOError:
+ raise IOError, "Cannot load external tileset: " + path
+
+ tileset_node = tsx.getElementsByTagName("tileset")[0]
+ tileset, tiles = parse_tileset(tileset_node, tileset.firstgid)
+ else:
+ raise Exception, "Found external tileset, but cannot handle type: " + tileset.source
+
+ # if we have an "image" tag, process it here
+ try:
+ image_node = node.getElementsByTagName("image")[0]
+ except IndexError:
+ print "cannot find associated image"
+ else:
+ attr = get_attributes(image_node)
+ tileset.source = attr["source"]
+ tileset.trans = attr["trans"]
+
+ # calculate the number of tiles in this tileset
+ x, r = divmod(attr["width"], tileset.tilewidth)
+ y, r = divmod(attr["height"], tileset.tileheight)
+
+ tileset.lastgid = tileset.firstgid + x + y
+
+ return tileset, tiles
+
+
+ def parse_layer(tilesets, node):
+ """
+ parse a layer element and return a layer object
+
+ tilesets is required since we need to mangle gid's here
+ """
+
+ layer = TiledLayer()
+ layer.data = []
+ layer.flipped_tiles = []
+ set_properties(layer, node)
+
+ data = None
+ next_gid = None
+
+ data_node = node.getElementsByTagName("data")[0]
+ attr = get_attributes(data_node)
+
+ if attr["encoding"] == "base64":
+ from base64 import decodestring
+ data = decodestring(data_node.lastChild.nodeValue)
+
+ elif attr["encoding"] == "csv":
+ next_gid = imap(int, "".join([line.strip() for line in data_node.lastChild.nodeValue]).split(","))
+
+ elif not attr["encoding"] == None:
+ raise Exception, "TMX encoding type: " + str(attr["encoding"]) + " is not supported."
+
+ if attr["compression"] == "gzip":
+ from StringIO import StringIO
+ import gzip
+ with gzip.GzipFile(fileobj=StringIO(data)) as fh:
+ data = fh.read()
+
+ if attr["compression"] == "zlib":
+ try:
+ import zlib
+ except:
+ raise Exception, "Cannot import zlib. Make sure modules and libraries are installed."
+
+ data = zlib.decompress(data)
+
+ elif not attr["compression"] == None:
+ raise Exception, "TMX compression type: " + str(attr["compression"]) + " is not supported."
+
+ # if data is None, then it was not decoded or decompressed, so
+ # we assume here that it is going to be a bunch of tile elements
+ if attr["encoding"] == next_gid == None:
+ def get_children(parent):
+ for child in parent.getElementsByTagName("tile"):
+ yield int(child.getAttribute("gid"))
+
+ next_gid = get_children(data_node)
+
+ elif not data == None:
+ # cast the data as a list of 32-bit integers
+ next_gid = imap(lambda i: unpack("