From b98972f0f44f419bf61cca9089d1062081074586 Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 15 Jul 2014 15:07:22 -0400 Subject: [PATCH 01/33] Update integrationtestlib.py Updated to make a log file in a specified directory --- integrationtestlib.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index 24fd20e..3a7150e 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -24,6 +24,7 @@ import time import socket import sys +import os # the people to notify on failure/if anything goes wrong notify_list = ["jcappos@poly.edu", "monzum@u.washington.edu", "gppressi@gmail.com", "leon.wlaw@gmail.com", "hermanchchen@gmail.com"] @@ -46,7 +47,15 @@ def log(msg): None. """ - print time.ctime() + " : " + msg + if raw_input("Would you like a log file? (y/n)")=='y': + save_path = raw_input("Where would you like to save the log file to?") #format for this is'/home/user/...' + name_of_file = "log" + completeName = os.path.join(save_path, name_of_file+".txt") + logFile = open(completeName, "a") + logFile.write(time.ctime() + " : " + msg) + logFile.close() +print time.ctime() + " : " + msg + return @@ -82,7 +91,7 @@ def notify(text, subject): for emailaddr in notify_list: log("notifying " + emailaddr) send_gmail.send_gmail(emailaddr, subject, text, "") - + return From e5b25d9ce3fe58fa490e38550772a1fd36515bc9 Mon Sep 17 00:00:00 2001 From: aot221 Date: Thu, 31 Jul 2014 01:45:34 -0400 Subject: [PATCH 02/33] (Work in Progress) --- selexor_active.py | 153 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 selexor_active.py diff --git a/selexor_active.py b/selexor_active.py new file mode 100644 index 0000000..9478036 --- /dev/null +++ b/selexor_active.py @@ -0,0 +1,153 @@ +""" + + selexor_active.py + + + Attempts to acquire and release vessels via selexor. + + + Modify the following global var params to have this script functional: + - notify_list, a list of strings with emails denoting who will be + emailed when something goes wrong + + This script takes no arguments. A typical use of this script is to + have it run periodically using something like the following crontab line: + 7 * * * * /usr/bin/python /home/seattle/centralizedputget.py > /home/seattle/cron_log.centralizedputget +""" + +import urllib +import urllib2 +import send_gmail +import integrationtestlib + +import subprocess + + +import repyhelper +repyhelper.translate_and_import('serialize.repy') + + +username = 'selexor_monitor' +apikey = '1X3YFBLPTKVSI8DQHWJZ0NR92645ECUA' + +userdata = {username: {'apikey': apikey}} + +ERROR_EMAIL_SUBJECT = "Selexor monitoring test failure!" +SELEXOR_PAGE = "https://selexor.poly.edu:8888/" + + +def retrieve_url(url, data=None): + args = ["curl", url, "--insecure"] + if data: + args += ['--data', '"'+data+'"'] + response_text = urllib2.urlopen(SELEXOR_PAGE) + # Return the output of the URL download + return response_text.read() + +def test_selexor_alive(): + response_text = retrieve_url(SELEXOR_PAGE) + if not response_text: + raise Exception("Server returned no data!") + + if "404 Not Found" in response_text: + raise Exception("Server reported 404 Not Found!") + + +def test_selexor_acquire(): + """ + + Tests to make sure that users can acquire vessels. + + + None + + + Acquires a single vessel on the user's behalf. + + + None + + + The vessel dictionary of the acquired vessel. This can be used to + is how selexor references each vessel. + """ + print "mark" + get_one_vessel = { + '0': { + 'id': 0.0, 'allocate': '1', 'rules': { + 'port': {'port': '63139'} + }}} + requestinfo = {'request': { + 'userdata': userdata, + 'groups': get_one_vessel + }} + response_text = urllib2.urlopen(SELEXOR_PAGE) + html = response_text.read() + print type(html) + query_response = serialize_deserializedata(html) + if query_response['data']['status'] != 'working': + raise Exception("Failed to submit request! Response: " + str(query_response)) + print "mark1.6" + query = {'query': { + 'userdata': userdata + }} + print "mark1.7" + while query_response['data']['status'] == 'working': + # Give selexor some time to do its processing... + sleep(10) + response_text = retrieve_url(SELEXOR_PAGE, + data=serialize_serializedata(query)).read() + query_response = serialize_deserializedata(response_text) + print "mark1.8" + if query_response['data']['status'] != 'complete': + raise Exception("Acquiring one vessel failed! response: " + str(query_response)) + + return query_response['data']['groups']['0'] + + +def test_selexor_release(vessel_dict): + """ + + Tests to make sure that users can release vessels. + + + vessel_dict: A dictionary representing a single vessel to release. + + + Releases the vessel specified. + + + None + + + None + """ + userdata = {'leonwlaw': {'apikey': 'AHXRZ2D4FMU0SJPGKTNCLQ8VIE691WB5'}} + requestinfo = {'request': { + 'userdata': userdata, + 'vessels': [vessel_dict] + }} + + response_text = urllib2.urlopen(SELEXOR_PAGE, + data=serialize_serializedata(requestinfo)).read() + + +def main(): + send_gmail.init_gmail() + + try: + if username is None or apikey is None: + raise Exception("Username and/or API key is not set!") + test_selexor_alive() + # Not working, might be that selexor's not performing HTTP requests + # correctly + acquired_vessel = test_selexor_acquire() + # test_selexor_release(acquired_vessel) + except: + print("integrationtestlib.handle_exception('Exception occurred when contacting selexor', ERROR_EMAIL_SUBJECT)") + exit() + + +if __name__ == "__main__": + main() + From e06d092da4c1d1298705f0a1f66c2721fbff16f2 Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 5 Aug 2014 15:12:23 -0400 Subject: [PATCH 03/33] WIP - Currently being debugged --- selexor_active.py | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/selexor_active.py b/selexor_active.py index 9478036..d41ccfc 100644 --- a/selexor_active.py +++ b/selexor_active.py @@ -16,10 +16,9 @@ """ import urllib -import urllib2 import send_gmail import integrationtestlib - +import urllib2 import subprocess @@ -37,12 +36,20 @@ def retrieve_url(url, data=None): + print "retrieve_url" args = ["curl", url, "--insecure"] if data: args += ['--data', '"'+data+'"'] - response_text = urllib2.urlopen(SELEXOR_PAGE) + print "download_process" + download_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + print "downloadwait" + download_process.wait() + #print "communicate download: " + str(download_process.communicate()[0]) + #print download_process.communicate()[0] # Return the output of the URL download - return response_text.read() + #print "communicate download: " + download_process.communicate()[0] + return download_process.communicate()[0] + def test_selexor_alive(): response_text = retrieve_url(SELEXOR_PAGE) @@ -71,7 +78,6 @@ def test_selexor_acquire(): The vessel dictionary of the acquired vessel. This can be used to is how selexor references each vessel. """ - print "mark" get_one_vessel = { '0': { 'id': 0.0, 'allocate': '1', 'rules': { @@ -81,24 +87,25 @@ def test_selexor_acquire(): 'userdata': userdata, 'groups': get_one_vessel }} - response_text = urllib2.urlopen(SELEXOR_PAGE) - html = response_text.read() - print type(html) - query_response = serialize_deserializedata(html) + print "Mark0" + print userdata + response_text = retrieve_url(SELEXOR_PAGE, + data=serialize_serializedata(requestinfo)) + print "Mark1" + query_response = serialize_deserializedata(response_text) if query_response['data']['status'] != 'working': raise Exception("Failed to submit request! Response: " + str(query_response)) - print "mark1.6" + print "Mark2" query = {'query': { 'userdata': userdata }} - print "mark1.7" while query_response['data']['status'] == 'working': # Give selexor some time to do its processing... sleep(10) response_text = retrieve_url(SELEXOR_PAGE, data=serialize_serializedata(query)).read() query_response = serialize_deserializedata(response_text) - print "mark1.8" + if query_response['data']['status'] != 'complete': raise Exception("Acquiring one vessel failed! response: " + str(query_response)) @@ -128,7 +135,7 @@ def test_selexor_release(vessel_dict): 'vessels': [vessel_dict] }} - response_text = urllib2.urlopen(SELEXOR_PAGE, + response_text = urllib.urlopen(SELEXOR_PAGE, data=serialize_serializedata(requestinfo)).read() @@ -138,13 +145,14 @@ def main(): try: if username is None or apikey is None: raise Exception("Username and/or API key is not set!") - test_selexor_alive() + + #test_selexor_alive() # Not working, might be that selexor's not performing HTTP requests # correctly acquired_vessel = test_selexor_acquire() # test_selexor_release(acquired_vessel) except: - print("integrationtestlib.handle_exception('Exception occurred when contacting selexor', ERROR_EMAIL_SUBJECT)") + print("integrationtestlib.handle_exception(Exception occurred when contacting selexor, ERROR_EMAIL_SUBJECT)") exit() From 79efd33e6502a757ccdb6800b2c09c33edfa924e Mon Sep 17 00:00:00 2001 From: aot221 Date: Thu, 7 Aug 2014 00:34:40 -0400 Subject: [PATCH 04/33] New integrationtestlib that utilizes a file for the notify list of emails --- integrationtestlib.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index 3a7150e..4ccf498 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -24,11 +24,12 @@ import time import socket import sys -import os # the people to notify on failure/if anything goes wrong -notify_list = ["jcappos@poly.edu", "monzum@u.washington.edu", "gppressi@gmail.com", "leon.wlaw@gmail.com", "hermanchchen@gmail.com"] - +with open("notify_list", "rb") as fp: + notify_list = [] + for line in fp: + notify_list.append(line[:-1]) if line[-1] == "\n" else lines.append(line) def log(msg): """ @@ -47,15 +48,7 @@ def log(msg): None. """ - if raw_input("Would you like a log file? (y/n)")=='y': - save_path = raw_input("Where would you like to save the log file to?") #format for this is'/home/user/...' - name_of_file = "log" - completeName = os.path.join(save_path, name_of_file+".txt") - logFile = open(completeName, "a") - logFile.write(time.ctime() + " : " + msg) - logFile.close() -print time.ctime() + " : " + msg - + print time.ctime() + " : " + msg return @@ -91,7 +84,7 @@ def notify(text, subject): for emailaddr in notify_list: log("notifying " + emailaddr) send_gmail.send_gmail(emailaddr, subject, text, "") - + return From f46ed880f40c28e6c1d426ff416e3cc23e8c9997 Mon Sep 17 00:00:00 2001 From: aot221 Date: Thu, 7 Aug 2014 00:38:36 -0400 Subject: [PATCH 05/33] Update integrationtestlib.py --- integrationtestlib.py | 1 + 1 file changed, 1 insertion(+) diff --git a/integrationtestlib.py b/integrationtestlib.py index 4ccf498..7312d01 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -24,6 +24,7 @@ import time import socket import sys +import os # the people to notify on failure/if anything goes wrong with open("notify_list", "rb") as fp: From fcbd07bbde559965c9b547ef9dffb503c4d5926f Mon Sep 17 00:00:00 2001 From: aot221 Date: Thu, 7 Aug 2014 00:46:07 -0400 Subject: [PATCH 06/33] Delete selexor_active.py --- selexor_active.py | 161 ---------------------------------------------- 1 file changed, 161 deletions(-) delete mode 100644 selexor_active.py diff --git a/selexor_active.py b/selexor_active.py deleted file mode 100644 index d41ccfc..0000000 --- a/selexor_active.py +++ /dev/null @@ -1,161 +0,0 @@ -""" - - selexor_active.py - - - Attempts to acquire and release vessels via selexor. - - - Modify the following global var params to have this script functional: - - notify_list, a list of strings with emails denoting who will be - emailed when something goes wrong - - This script takes no arguments. A typical use of this script is to - have it run periodically using something like the following crontab line: - 7 * * * * /usr/bin/python /home/seattle/centralizedputget.py > /home/seattle/cron_log.centralizedputget -""" - -import urllib -import send_gmail -import integrationtestlib -import urllib2 -import subprocess - - -import repyhelper -repyhelper.translate_and_import('serialize.repy') - - -username = 'selexor_monitor' -apikey = '1X3YFBLPTKVSI8DQHWJZ0NR92645ECUA' - -userdata = {username: {'apikey': apikey}} - -ERROR_EMAIL_SUBJECT = "Selexor monitoring test failure!" -SELEXOR_PAGE = "https://selexor.poly.edu:8888/" - - -def retrieve_url(url, data=None): - print "retrieve_url" - args = ["curl", url, "--insecure"] - if data: - args += ['--data', '"'+data+'"'] - print "download_process" - download_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - print "downloadwait" - download_process.wait() - #print "communicate download: " + str(download_process.communicate()[0]) - #print download_process.communicate()[0] - # Return the output of the URL download - #print "communicate download: " + download_process.communicate()[0] - return download_process.communicate()[0] - - -def test_selexor_alive(): - response_text = retrieve_url(SELEXOR_PAGE) - if not response_text: - raise Exception("Server returned no data!") - - if "404 Not Found" in response_text: - raise Exception("Server reported 404 Not Found!") - - -def test_selexor_acquire(): - """ - - Tests to make sure that users can acquire vessels. - - - None - - - Acquires a single vessel on the user's behalf. - - - None - - - The vessel dictionary of the acquired vessel. This can be used to - is how selexor references each vessel. - """ - get_one_vessel = { - '0': { - 'id': 0.0, 'allocate': '1', 'rules': { - 'port': {'port': '63139'} - }}} - requestinfo = {'request': { - 'userdata': userdata, - 'groups': get_one_vessel - }} - print "Mark0" - print userdata - response_text = retrieve_url(SELEXOR_PAGE, - data=serialize_serializedata(requestinfo)) - print "Mark1" - query_response = serialize_deserializedata(response_text) - if query_response['data']['status'] != 'working': - raise Exception("Failed to submit request! Response: " + str(query_response)) - print "Mark2" - query = {'query': { - 'userdata': userdata - }} - while query_response['data']['status'] == 'working': - # Give selexor some time to do its processing... - sleep(10) - response_text = retrieve_url(SELEXOR_PAGE, - data=serialize_serializedata(query)).read() - query_response = serialize_deserializedata(response_text) - - if query_response['data']['status'] != 'complete': - raise Exception("Acquiring one vessel failed! response: " + str(query_response)) - - return query_response['data']['groups']['0'] - - -def test_selexor_release(vessel_dict): - """ - - Tests to make sure that users can release vessels. - - - vessel_dict: A dictionary representing a single vessel to release. - - - Releases the vessel specified. - - - None - - - None - """ - userdata = {'leonwlaw': {'apikey': 'AHXRZ2D4FMU0SJPGKTNCLQ8VIE691WB5'}} - requestinfo = {'request': { - 'userdata': userdata, - 'vessels': [vessel_dict] - }} - - response_text = urllib.urlopen(SELEXOR_PAGE, - data=serialize_serializedata(requestinfo)).read() - - -def main(): - send_gmail.init_gmail() - - try: - if username is None or apikey is None: - raise Exception("Username and/or API key is not set!") - - #test_selexor_alive() - # Not working, might be that selexor's not performing HTTP requests - # correctly - acquired_vessel = test_selexor_acquire() - # test_selexor_release(acquired_vessel) - except: - print("integrationtestlib.handle_exception(Exception occurred when contacting selexor, ERROR_EMAIL_SUBJECT)") - exit() - - -if __name__ == "__main__": - main() - From edea25ef4be9b255c3b4a7ef7fa908ce307cbbac Mon Sep 17 00:00:00 2001 From: aot221 Date: Thu, 7 Aug 2014 10:13:51 -0400 Subject: [PATCH 07/33] Update integrationtestlib.py --- integrationtestlib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index 7312d01..bef0f10 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -30,7 +30,7 @@ with open("notify_list", "rb") as fp: notify_list = [] for line in fp: - notify_list.append(line[:-1]) if line[-1] == "\n" else lines.append(line) + notify_list.append(line[:-1]) if line[-1] == "\n" else notify_list.append(line) def log(msg): """ From a676f225d2243fbb4ea167ca6161829e67d7cf1d Mon Sep 17 00:00:00 2001 From: aot221 Date: Thu, 7 Aug 2014 11:34:07 -0400 Subject: [PATCH 08/33] Update integrationtestlib.py --- integrationtestlib.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index bef0f10..65e302d 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -24,13 +24,8 @@ import time import socket import sys -import os # the people to notify on failure/if anything goes wrong -with open("notify_list", "rb") as fp: - notify_list = [] - for line in fp: - notify_list.append(line[:-1]) if line[-1] == "\n" else notify_list.append(line) def log(msg): """ @@ -81,11 +76,17 @@ def notify(text, subject): except: pass subject = subject + " @ "+ hostname + " : " + sys.argv[0] - + #the people to notify if anyting goes wrong + + with open("email_address_list_file", "r") as emaillist: + notify_list = [] + for emailaddr in notify_list: + notify_list.append(line[:-1]) if line[-1] == "\n" else notify_list.append(line) + for emailaddr in notify_list: log("notifying " + emailaddr) send_gmail.send_gmail(emailaddr, subject, text, "") - + return From 2158e6f7165aa1f7c0d1df6b1df030977dfcb2cf Mon Sep 17 00:00:00 2001 From: aot221 Date: Mon, 11 Aug 2014 03:08:28 -0400 Subject: [PATCH 09/33] Create centralizedputget.py --- centralizedputget.py | 1059 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1059 insertions(+) create mode 100644 centralizedputget.py diff --git a/centralizedputget.py b/centralizedputget.py new file mode 100644 index 0000000..15ca8a8 --- /dev/null +++ b/centralizedputget.py @@ -0,0 +1,1059 @@ +#!/usr/bin/python +""" + + centralizedputget.mix + + + January 8, 2008 + + + justinc@cs.washington.edu + Justin Cappos + + + Attempt to put a (k,v) into our centralized hash table and then get it back. + On error send an email to some folks. + + + Modify the following global var params to have this script functional: + - notify_list, a list of strings with emails denoting who will be + emailed when something goes wrong + + This script takes no arguments. A typical use of this script is to + have it run periodically using something like the following crontab line: + 7 * * * * /usr/bin/python /home/seattle/centralizedputget.py > /home/seattle/cron_log.centralizedputget +""" + +import time +import os +import socket +import sys +import traceback +import threading +import random + +import send_gmail +import integrationtestlib +import nonportable + +from repyportability import * + + +#begin include centralizedadvertise.repy +""" +Author: Justin Cappos + +Start Date: July 8, 2008 + +Description: +Advertisements to a central server (similar to openDHT) + + +""" + +#begin include centralizedadvertise_base.repy +""" +Author: Justin Cappos + +Start Date: July 8, 2008 + +Description: +Advertisements to a central server (similar to openDHT) + + +""" + +#begin include session.repy +# This module wraps communications in a signaling protocol. The purpose is to +# overlay a connection-based protocol with explicit message signaling. +# +# The protocol is to send the size of the message followed by \n and then the +# message itself. The size of a message must be able to be stored in +# sessionmaxdigits. A size of -1 indicates that this side of the connection +# should be considered closed. +# +# Note that the client will block while sending a message, and the receiver +# will block while recieving a message. +# +# While it should be possible to reuse the connectionbased socket for other +# tasks so long as it does not overlap with the time periods when messages are +# being sent, this is inadvisable. + +class SessionEOF(Exception): + pass + +sessionmaxdigits = 20 + +# get the next message off of the socket... +def session_recvmessage(socketobj): + + messagesizestring = '' + # first, read the number of characters... + for junkcount in range(sessionmaxdigits): + currentbyte = socketobj.recv(1) + + if currentbyte == '\n': + break + + # not a valid digit + if currentbyte not in '0123456789' and messagesizestring != '' and currentbyte != '-': + raise ValueError, "Bad message size" + + messagesizestring = messagesizestring + currentbyte + + else: + # too large + raise ValueError, "Bad message size" + + try: + messagesize = int(messagesizestring) + except ValueError: + raise ValueError, "Bad message size" + + # nothing to read... + if messagesize == 0: + return '' + + # end of messages + if messagesize == -1: + raise SessionEOF, "Connection Closed" + + if messagesize < 0: + raise ValueError, "Bad message size" + + data = '' + while len(data) < messagesize: + chunk = socketobj.recv(messagesize-len(data)) + if chunk == '': + raise SessionEOF, "Connection Closed" + data = data + chunk + + return data + +# a private helper function +def session_sendhelper(socketobj,data): + sentlength = 0 + # if I'm still missing some, continue to send (I could have used sendall + # instead but this isn't supported in repy currently) + while sentlength < len(data): + thissent = socketobj.send(data[sentlength:]) + sentlength = sentlength + thissent + + + +# send the message +def session_sendmessage(socketobj,data): + header = str(len(data)) + '\n' + # Sending these piecemeal does not accomplish anything, and can contribute + # to timeout issues when run by constantly overloaded machines. + # session_sendhelper(socketobj,header) + + # Concatenate the header and data, rather than sending both separately. + complete_packet = header + data + + # session_sendhelper(socketobj,data) + + session_sendhelper(socketobj, complete_packet) + + +#end include session.repy +# I'll use socket timeout to prevent hanging when it takes a long time... +#begin include sockettimeout.repy +""" + + Justin Cappos, Armon Dadgar + This is a rewrite of the previous version by Richard Jordan + + + 26 Aug 2009 + + + A library that causes sockets to timeout if a recv / send call would + block for more than an allotted amount of time. + +""" + + +class SocketTimeoutError(Exception): + """The socket timed out before receiving a response""" + + +class _timeout_socket(): + """ + + Provides a socket like object which supports custom timeouts + for send() and recv(). + """ + + # Initialize with the socket object and a default timeout + def __init__(self,socket,timeout=10, checkintv='fibonacci'): + """ + + Initializes a timeout socket object. + + + socket: + A socket like object to wrap. Must support send,recv,close, and willblock. + + timeout: + The default timeout for send() and recv(). + + checkintv: + How often socket operations (send,recv) should check if + they can run. The smaller the interval the more time is + spent busy waiting. + """ + # Store the socket, timeout and check interval + self.socket = socket + self.timeout = timeout + self.checkintv = checkintv + + + # Allow changing the default timeout + def settimeout(self,timeout=10): + """ + + Allows changing the default timeout interval. + + + timeout: + The new default timeout interval. Defaults to 10. + Use 0 for no timeout. Given in seconds. + + """ + # Update + self.timeout = timeout + + + # Wrap willblock + def willblock(self): + """ + See socket.willblock() + """ + return self.socket.willblock() + + + # Wrap close + def close(self): + """ + See socket.close() + """ + return self.socket.close() + + + # Provide a recv() implementation + def recv(self,bytes,timeout=None): + """ + + Allows receiving data from the socket object with a custom timeout. + + + bytes: + The maximum amount of bytes to read + + timeout: + (Optional) Defaults to the value given at initialization, or by settimeout. + If provided, the socket operation will timeout after this amount of time (sec). + Use 0 for no timeout. + + + As with socket.recv(), socket.willblock(). Additionally, SocketTimeoutError is + raised if the operation times out. + + + The data received from the socket. + """ + + # It's worth noting that this fibonacci backoff begins with a 2ms poll rate, and + # provides a simple exponential backoff scheme. + + fibonacci_backoff = False + backoff_cap = 100 # Never use more than 100ms poll rate. + + pre_value = 1.0 # Our iterators for Fibonacci sequence. + pre_pre_value = 1.0 # + + # Since we want to be able to initialize with static poll rates (backwards + # compatibility) we specify a string if we're using the fibonacci backoff. + if type(self.checkintv) is str: + if self.checkintv == 'fibonacci': + fibonacci_backoff = True + + # Set the timeout if None + if timeout is None: + timeout = self.timeout + + # Get the start time + starttime = getruntime() + + # Block until we can read + rblock, wblock = self.socket.willblock() + while rblock: + # Check if we should break + if timeout > 0: + # Get the elapsed time + diff = getruntime() - starttime + + # Raise an exception + if diff > timeout: + raise SocketTimeoutError,"recv() timed out!" + + if fibonacci_backoff: + # Iterate the sequence once + sleep_length = pre_value + pre_pre_value + pre_pre_value = pre_value + pre_value = sleep_length + + # Make sure we don't exceed maximum backoff. + if sleep_length > backoff_cap: + sleep_length = backoff_cap + + # Unit conversion to seconds + sleep_length = sleep_length / 1000.0 + + # Sleep + sleep(sleep_length) + else: # Classic functionality. + # Sleep + try: + sleep(float(self.checkintv)) + except: + sleep(0.1) + + # If available, move to the next value of checkintv. + + + # Update rblock + rblock, wblock = self.socket.willblock() + + # Do the recv + return self.socket.recv(bytes) + + + # Provide a send() implementation + def send(self,data,timeout=None): + """ + + Allows sending data with the socket object with a custom timeout. + + + data: + The data to send + + timeout: + (Optional) Defaults to the value given at initialization, or by settimeout. + If provided, the socket operation will timeout after this amount of time (sec). + Use 0 for no timeout. + + + As with socket.send(), socket.willblock(). Additionally, SocketTimeoutError is + raised if the operation times out. + + + The number of bytes sent. + """ + # Set the timeout if None + if timeout is None: + timeout = self.timeout + + # Get the start time + starttime = getruntime() + + # Block until we can write + rblock, wblock = self.socket.willblock() + while wblock: + # Check if we should break + if timeout > 0: + # Get the elapsed time + diff = getruntime() - starttime + + # Raise an exception + if diff > timeout: + raise SocketTimeoutError,"send() timed out!" + + # Sleep + # Since switching to the fibonacci backoff, the nature of + # this field has changed. Rather than implement the backoff + # for checking block status (seems wasteful) we'll just use + # a constant value. Ten ms seems appropriate. + sleep(0.010) + + # Update rblock + rblock, wblock = self.socket.willblock() + + # Do the recv + return self.socket.send(data) + + + + +def timeout_openconn(desthost, destport, localip=None, localport=None, timeout=5): + """ + + Wrapper for openconn. Very, very similar + + + Same as Repy openconn + + + Raises the same exceptions as openconn. + + + Creates a socket object for the user + + + socket obj on success + """ + + realsocketlikeobject = openconn(desthost, destport, localip, localport, timeout) + + thissocketlikeobject = _timeout_socket(realsocketlikeobject, timeout) + return thissocketlikeobject + + + + + +def timeout_waitforconn(localip, localport, function, timeout=5): + """ + + Wrapper for waitforconn. Essentially does the same thing... + + + Same as Repy waitforconn with the addition of a timeout argument. + + + Same as Repy waitforconn + + + Sets up event listener which calls function on messages. + + + Handle to listener. + """ + + # We use a closure for the callback we pass to waitforconn so that we don't + # have to map mainch's to callback functions or deal with potential race + # conditions if we did maintain such a mapping. + def _timeout_waitforconn_callback(localip, localport, sockobj, ch, mainch): + # 'timeout' is the free variable 'timeout' that was the argument to + # timeout_waitforconn. + thissocketlikeobject = _timeout_socket(sockobj, timeout) + + # 'function' is the free variable 'function' that was the argument to + # timeout_waitforconn. + return function(localip, localport, thissocketlikeobject, ch, mainch) + + return waitforconn(localip, localport, _timeout_waitforconn_callback) + + + + + +# a wrapper for stopcomm +def timeout_stopcomm(commhandle): + """ + Wrapper for stopcomm. Does the same thing... + """ + + return stopcomm(commhandle) + + + + +#end include sockettimeout.repy +#begin include serialize.repy +""" +Author: Justin Cappos + + +Start date: October 9th, 2009 + +Purpose: A simple library that serializes and deserializes built-in repy types. +This includes strings, integers, floats, booleans, None, complex, tuples, +lists, sets, frozensets, and dictionaries. + +There are no plans for including objects. + +Note: that all items are treated as separate references. This means things +like 'a = []; a.append(a)' will result in an infinite loop. If you have +'b = []; c = (b,b)' then 'c[0] is c[1]' is True. After deserialization +'c[0] is c[1]' is False. + +I can add support or detection of this if desired. +""" + +# The basic idea is simple. Say the type (a character) followed by the +# type specific data. This is adequate for simple types +# that do not contain other types. Types that contain other types, have +# a length indicator and then the underlying items listed sequentially. +# For a dict, this is key1value1key2value2. + + + +def serialize_serializedata(data): + """ + + Convert a data item of any type into a string such that we can + deserialize it later. + + + data: the thing to seriailize. Can be of essentially any type except + objects. + + + TypeError if the type of 'data' isn't allowed + + + None. + + + A string suitable for deserialization. + """ + + # this is essentially one huge case statement... + + # None + if type(data) == type(None): + return 'N' + + # Boolean + elif type(data) == type(True): + if data == True: + return 'BT' + else: + return 'BF' + + # Integer / Long + elif type(data) is int or type(data) is long: + datastr = str(data) + return 'I'+datastr + + + # Float + elif type(data) is float: + datastr = str(data) + return 'F'+datastr + + + # Complex + elif type(data) is complex: + datastr = str(data) + if datastr[0] == '(' and datastr[-1] == ')': + datastr = datastr[1:-1] + return 'C'+datastr + + + + # String + elif type(data) is str: + return 'S'+data + + + # List or tuple or set or frozenset + elif type(data) is list or type(data) is tuple or type(data) is set or type(data) is frozenset: + # the only impact is the first letter... + if type(data) is list: + mystr = 'L' + elif type(data) is tuple: + mystr = 'T' + elif type(data) is set: + mystr = 's' + elif type(data) is frozenset: + mystr = 'f' + else: + raise Exception("InternalError: not a known type after checking") + + for item in data: + thisitem = serialize_serializedata(item) + # Append the length of the item, plus ':', plus the item. 1 -> '2:I1' + mystr = mystr + str(len(thisitem))+":"+thisitem + + mystr = mystr + '0:' + + return mystr + + + # dict + elif type(data) is dict: + mystr = 'D' + + keysstr = serialize_serializedata(data.keys()) + # Append the length of the list, plus ':', plus the list. + mystr = mystr + str(len(keysstr))+":"+keysstr + + # just plop the values on the end. + valuestr = serialize_serializedata(data.values()) + mystr = mystr + valuestr + + return mystr + + + # Unknown!!! + else: + raise TypeError("Unknown type '"+str(type(data))+"' for data :"+str(data)) + + + +def serialize_deserializedata(datastr): + """ + + Convert a serialized data string back into its original types. + + + datastr: the string to deseriailize. + + + ValueError if the string is corrupted + TypeError if the type of 'data' isn't allowed + + + None. + + + Items of the original type + """ + + if type(datastr) != str: + raise TypeError("Cannot deserialize non-string of type '"+str(type(datastr))+"'") + typeindicator = datastr[0] + restofstring = datastr[1:] + + # this is essentially one huge case statement... + + # None + if typeindicator == 'N': + if restofstring != '': + raise ValueError("Malformed None string '"+restofstring+"'") + return None + + # Boolean + elif typeindicator == 'B': + if restofstring == 'T': + return True + elif restofstring == 'F': + return False + raise ValueError("Malformed Boolean string '"+restofstring+"'") + + # Integer / Long + elif typeindicator == 'I': + try: + return int(restofstring) + except ValueError: + raise ValueError("Malformed Integer string '"+restofstring+"'") + + + # Float + elif typeindicator == 'F': + try: + return float(restofstring) + except ValueError: + raise ValueError("Malformed Float string '"+restofstring+"'") + + # Float + elif typeindicator == 'C': + try: + return complex(restofstring) + except ValueError: + raise ValueError("Malformed Complex string '"+restofstring+"'") + + + + # String + elif typeindicator == 'S': + return restofstring + + # List / Tuple / set / frozenset / dict + elif typeindicator == 'L' or typeindicator == 'T' or typeindicator == 's' or typeindicator == 'f': + # We'll split this and keep adding items to the list. At the end, we'll + # convert it to the right type + + thislist = [] + + data = restofstring + # We'll use '0:' as our 'end separator' + while data != '0:': + lengthstr, restofdata = data.split(':', 1) + length = int(lengthstr) + + # get this item, convert to a string, append to the list. + thisitemdata = restofdata[:length] + thisitem = serialize_deserializedata(thisitemdata) + thislist.append(thisitem) + + # Now toss away the part we parsed. + data = restofdata[length:] + + if typeindicator == 'L': + return thislist + elif typeindicator == 'T': + return tuple(thislist) + elif typeindicator == 's': + return set(thislist) + elif typeindicator == 'f': + return frozenset(thislist) + else: + raise Exception("InternalError: not a known type after checking") + + + elif typeindicator == 'D': + + lengthstr, restofdata = restofstring.split(':', 1) + length = int(lengthstr) + + # get this item, convert to a string, append to the list. + keysdata = restofdata[:length] + keys = serialize_deserializedata(keysdata) + + # The rest should be the values list. + values = serialize_deserializedata(restofdata[length:]) + + if type(keys) != list or type(values) != list or len(keys) != len(values): + raise ValueError("Malformed Dict string '"+restofstring+"'") + + thisdict = {} + for position in xrange(len(keys)): + thisdict[keys[position]] = values[position] + + return thisdict + + + + + # Unknown!!! + else: + raise ValueError("Unknown typeindicator '"+str(typeindicator)+"' for data :"+str(restofstring)) + + + + +#end include serialize.repy + + +class CentralAdvertiseError(Exception): + """Error when advertising a value to the central advertise service.""" + +def centralizedadvertisebase_announce(servername, serverport, key, value, ttlval): + """ + + Announce a key / value pair into the CHT. + + + servername: the server ip/name to contact. Must be a string. + + serverport: the server port to contact. Must be an integer. + + key: the key to put the value under. This will be converted to a string. + + value: the value to store at the key. This is also converted to a string. + + ttlval: the amount of time until the value expires. Must be an integer + + + TypeError if ttlval is of the wrong type. + + ValueError if ttlval is not positive + + CentralAdvertiseError is raised the server response is corrupted + + Various network and timeout exceptions are raised by timeout_openconn + and session_sendmessage / session_recvmessage + + + The CHT will store the key / value pair. + + + None + """ + # do basic argument checking / munging + key = str(key) + value = str(value) + + if not type(ttlval) is int and not type(ttlval) is long: + raise TypeError("Invalid type '"+str(type(ttlval))+"' for ttlval.") + + if ttlval < 1: + raise ValueError("The argument ttlval must be positive, not '"+str(ttlval)+"'") + + + # build the tuple to send, then convert to a string because only strings + # (bytes) can be transmitted over the network... + datatosend = ('PUT',key,value,ttlval) + datastringtosend = serialize_serializedata(datatosend) + + + # send the data over a timeout socket using the session library, then + # get a response from the server. + sockobj = timeout_openconn(servername,serverport, timeout=10) + try: + session_sendmessage(sockobj, datastringtosend) + rawresponse = session_recvmessage(sockobj) + finally: + # BUG: This raises an error right now if the call times out ( #260 ) + # This isn't a big problem, but it is the "wrong" exception + sockobj.close() + + # We should check that the response is 'OK' + try: + response = serialize_deserializedata(rawresponse) + if response != 'OK': + raise CentralAdvertiseError("Centralized announce failed with '"+response+"'") + except ValueError, e: + raise CentralAdvertiseError("Received unknown response from server '"+rawresponse+"'") + + + + +def centralizedadvertisebase_lookup(servername, serverport, key, maxvals=100): + """ + + Returns a list of valid values stored under a key + + + servername: the server ip/name to contact. Must be a string. + + serverport: the server port to contact. Must be an integer. + + key: the key to put the value under. This will be converted to a string. + + maxvals: the maximum number of values to return. Must be an integer + + + TypeError if maxvals is of the wrong type. + + ValueError if maxvals is not a positive number + + CentralAdvertiseError is raised the server response is corrupted + + Various network and timeout exceptions are raised by timeout_openconn + and session_sendmessage / session_recvmessage + + + None + + + The list of values + """ + + # do basic argument checking / munging + key = str(key) + + if not type(maxvals) is int and not type(maxvals) is long: + raise TypeError("Invalid type '"+str(type(maxvals))+"' for ttlval.") + + if maxvals < 1: + raise ValueError("The argument ttlval must be positive, not '"+str(ttlval)+"'") + + # build the tuple to send, then convert to a string because only strings + # (bytes) can be transmitted over the network... + messagetosend = ('GET',key,maxvals) + messagestringtosend = serialize_serializedata(messagetosend) + + + sockobj = timeout_openconn(servername,serverport, timeout=10) + try: + session_sendmessage(sockobj, messagestringtosend) + rawreceiveddata = session_recvmessage(sockobj) + finally: + # BUG: This raises an error right now if the call times out ( #260 ) + # This isn't a big problem, but it is the "wrong" exception + sockobj.close() + + + try: + responsetuple = serialize_deserializedata(rawreceiveddata) + except ValueError, e: + raise CentralAdvertiseError("Received unknown response from server '"+rawresponse+"'") + + # For a set of values, 'a','b','c', I should see the response: + # ('OK', ['a','b','c']) Anything else is WRONG!!! + + if not type(responsetuple) is tuple: + raise CentralAdvertiseError("Received data is not a tuple '"+rawresponse+"'") + + if len(responsetuple) != 2: + raise CentralAdvertiseError("Response tuple did not have exactly two elements '"+rawresponse+"'") + if responsetuple[0] != 'OK': + raise CentralAdvertiseError("Central server returns error '"+str(responsetuple)+"'") + + + if not type(responsetuple[1]) is list: + raise CentralAdvertiseError("Received item is not a list '"+rawresponse+"'") + + for responseitem in responsetuple[1]: + if not type(responseitem) is str: + raise CentralAdvertiseError("Received item '"+str(responseitem)+"' is not a string in '"+rawresponse+"'") + + # okay, we *finally* seem to have what we expect... + + return responsetuple[1] + +#end include centralizedadvertise_base.repy + +# Hmm, perhaps I should make an initialization call instead of hardcoding this? +# I suppose it doesn't matter since one can always override these values +servername = "advertiseserver.poly.edu" +# This port is updated to use the new port (legacy port is 10101) +serverport = 10102 + + +def centralizedadvertise_announce(key, value, ttlval): + """ + + Announce a key / value pair into the CHT. + + + key: the key to put the value under. This will be converted to a string. + + value: the value to store at the key. This is also converted to a string. + + ttlval: the amount of time until the value expires. Must be an integer + + + TypeError if ttlval is of the wrong type. + + ValueError if ttlval is not positive + + CentralAdvertiseError is raised the server response is corrupted + + Various network and timeout exceptions are raised by timeout_openconn + and session_sendmessage / session_recvmessage + + + The CHT will store the key / value pair. + + + None + """ + return centralizedadvertisebase_announce(servername, serverport, key, value, ttlval) + + +def centralizedadvertise_lookup(key, maxvals=100): + """ + + Returns a list of valid values stored under a key + + + key: the key to put the value under. This will be converted to a string. + + maxvals: the maximum number of values to return. Must be an integer + + + TypeError if maxvals is of the wrong type. + + ValueError if maxvals is not a positive number + + CentralAdvertiseError is raised the server response is corrupted + + Various network and timeout exceptions are raised by timeout_openconn + and session_sendmessage / session_recvmessage + + + None + + + The list of values + """ + return centralizedadvertisebase_lookup(servername, serverport, key, maxvals) + +#end include centralizedadvertise.repy + +# event for communicating when the lookup is done or timedout +lookup_done_event = threading.Event() + + + +def lookup_timedout(): + """ + + Waits for lookup_done_event and notifies the folks on the + notify_list (global var) of the lookup timeout. + + + None. + + + None. + + + Sends an email to the notify_list folks + + + None. + """ + integrationtestlib.log("in lookup_timedout()") + notify_msg = "Centralized lookup failed -- lookup_timedout() fired after 30 sec." + + # wait for the event to be set, timeout after 30 minutes + wait_time = 1800 + tstamp_before_wait = nonportable.getruntime() + lookup_done_event.wait(wait_time) + tstamp_after_wait = nonportable.getruntime() + + t_waited = tstamp_after_wait - tstamp_before_wait + if abs(wait_time - t_waited) < 5: + notify_msg += " And lookup stalled for over 30 minutes (max timeout value)." + else: + notify_msg += " And lookup stalled for " + str(t_waited) + " seconds" + + integrationtestlib.notify(notify_msg) + return + +def main(): + """ + + Program's main. + + + None. + + + All exceptions are caught. + + + None. + + + None. + """ + # setup the gmail user/password to use when sending email + success,explanation_str = send_gmail.init_gmail() + if not success: + integrationtestlib.log(explanation_str) + sys.exit(0) + + key = random.randint(4,2**30) + value = random.randint(4,2**30) + ttlval = 60 + + # put(key,value) with ttlval into the Centralized HT + integrationtestlib.log("calling centralizedadvertise_announce(key: " + str(key) + ", val: " + str(value) + ", ttl: " + str(ttlval) + ")") + try: + centralizedadvertise_announce(key,value,ttlval) + except: + integrationtestlib.handle_exception("centralizedadvertise_announce() failed") + sys.exit(0) + + # a 30 second timer to email the notify_list on slow lookups + lookup_timedout_timer = threading.Timer(30, lookup_timedout) + # start the lookup timer + lookup_timedout_timer.start() + + # get(key) from the centralized HT + integrationtestlib.log("calling centralizedadvertise_lookup(key: " + str(key) + ")") + try: + ret_value = centralizedadvertise_lookup(key) + # TODO: check the return value as well + # ret_value = int(ret_value[0]) + except: + integrationtestlib.handle_exception("centralizedadvertise_lookup() failed") + sys.exit(0) + + lookup_timedout_timer.cancel() + lookup_done_event.set() + return + +if __name__ == "__main__": + main() + + From fd44ef31d8e90a1b24654d9bab4da9e93c8911f3 Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 12 Aug 2014 03:41:56 -0400 Subject: [PATCH 10/33] Updated to make sure that the ret_value is correct checks that the ret_value contains the value for the corresponding key. --- centralizedputget.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/centralizedputget.py b/centralizedputget.py index 15ca8a8..21f50a7 100644 --- a/centralizedputget.py +++ b/centralizedputget.py @@ -1043,8 +1043,12 @@ def main(): integrationtestlib.log("calling centralizedadvertise_lookup(key: " + str(key) + ")") try: ret_value = centralizedadvertise_lookup(key) + print ret_value # TODO: check the return value as well - # ret_value = int(ret_value[0]) + ret_value = int(ret_value[0]) + if (ret_value != value): + integrationtestlib.handle_exception("ret_value is incorrect") + print ("ret_value is incorrect") except: integrationtestlib.handle_exception("centralizedadvertise_lookup() failed") sys.exit(0) From ce3b110a5a339bc128440fc0fcdbfb01a4ec2c58 Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 12 Aug 2014 13:01:52 -0400 Subject: [PATCH 11/33] Create centralizedadvertise.repy --- centralizedadvertise.repy | 51 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 centralizedadvertise.repy diff --git a/centralizedadvertise.repy b/centralizedadvertise.repy new file mode 100644 index 0000000..7e361c0 --- /dev/null +++ b/centralizedadvertise.repy @@ -0,0 +1,51 @@ +""" +Author: Justin Cappos + +Start Date: July 8, 2008 + +Description: +Advertisements to a central server (similar to openDHT) + + +""" + +include session.repy +# I'll use socket timeout to prevent hanging when it takes a long time... +include sockettimeout.repy +servername = "satya.cs.washington.edu" +serverport = 10101 + +def centralizedadvertise_announce(key, value, ttlval): + + sockobj = timeout_openconn(servername,serverport, timeout=10) + try: + session_sendmessage(sockobj, "PUT|"+str(key)+"|"+str(value)+"|"+str(ttlval)) + response = session_recvmessage(sockobj) + if response != 'OK': + raise Exception, "Centralized announce failed '"+response+"'" + finally: + # BUG: This raises an error right now if the call times out ( #260 ) + # This isn't a big problem, but it is the "wrong" exception + sockobj.close() + + return True + + + + +def centralizedadvertise_lookup(key, maxvals=100): + sockobj = timeout_openconn(servername,serverport, timeout=10) + try: + session_sendmessage(sockobj, "GET|"+str(key)+"|"+str(maxvals)) + recvdata = session_recvmessage(sockobj) + # worked + if recvdata.endswith('OK'): + return recvdata[:-len('OK')].split(',') + raise Exception, "Centralized lookup failed" + finally: + # BUG: This raises an error right now if the call times out ( #260 ) + # This isn't a big problem, but it is the "wrong" exception + sockobj.close() + + + From 500e3466098a602b6d7aed825cde6075ff35bc44 Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 12 Aug 2014 13:02:22 -0400 Subject: [PATCH 12/33] Create dorputget_new.py --- dorputget_new.py | 155 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 155 insertions(+) create mode 100644 dorputget_new.py diff --git a/dorputget_new.py b/dorputget_new.py new file mode 100644 index 0000000..c110155 --- /dev/null +++ b/dorputget_new.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +""" + + dorputget.py + + + December 17, 2008 + + + ivan@cs.washington.edu + Ivan Beschastnikh + + + Attempt to put a (k,v) into DO registry and then get it back. On error + send an email to some folks. + + + Modify the following global var params to have this script functional: + - notify_list, a list of strings with emails denoting who will be + emailed when something goes wrong + + - GMAIL_USER and GMAIL_PWD environment variables: the username and + password of the gmail user who will be sending the email to the + emails in the notify_list (see crontab line below). + + This script takes no arguments. A typical use of this script is to + have it run periodically using something like the following crontab line: + 7 * * * * export GMAIL_USER='..' && export GMAIL_PWD='..' && /usr/bin/python /home/seattle/dorputget.py > /home/seattle/cron_log.dorputget +""" + +import time +import os +import socket +import sys +import traceback +import threading +import random + +import send_gmail +import integrationtestlib +import repyhelper + +repyhelper.translate_and_import("/home/integrationtester/cron_tests/dorputget/DORadvertise.repy") + +# Armon: This is to replace using the time command with getruntime +import nonportable + +# event for communicating when the lookup is done or timedout +lookup_done_event = threading.Event() + + + +def lookup_timedout(): + """ + + Waits for lookup_done_event and notifies the folks on the + notify_list (global var) of the lookup timeout. + + + None. + + + None. + + + Sends an email to the notify_list folks + + + None. + """ + integrationtestlib.log("in lookup_timedout()") + notify_msg = "DOR lookup failed -- lookup_timedout() fired after 60 seconds." + subject = "DOR with repy test failed" + + # wait for the event to be set, timeout after 30 minutes + wait_time = 1800 + tstamp_before_wait = nonportable.getruntime() + lookup_done_event.wait(wait_time) + tstamp_after_wait = nonportable.getruntime() + + t_waited = tstamp_after_wait - tstamp_before_wait + if abs(wait_time - t_waited) < 5: + notify_msg += " And lookup stalled for over 30 minutes (max timeout value)." + else: + notify_msg += " And lookup stalled for " + str(t_waited) + " seconds" + + integrationtestlib.notify(notify_msg,subject ) + return + +def main(): + """ + + Program's main. + + + None. + + + All exceptions are caught. + + + None. + + + None. + """ + # setup the gmail user/password to use when sending email + success,explanation_str = send_gmail.init_gmail() + if not success: + integrationtestlib.log(explanation_str) + sys.exit(0) + + integrationtestlib.notify_list.append("cemeyer@u.washington.edu") + + key = str(random.randint(4,2**30)) + value = str(random.randint(4,2**30)) + ttlval = 60 + subject = "DOR with repy test failed" + + + # put(key,value) with ttlval into DOR + integrationtestlib.log("calling DORadvertise_announce(key: " + str(key) + ", val: " + str(value) + ", ttl: " + str(ttlval) + ")") + try: + DORadvertise_announce(key, value, ttlval) + except: + message = "DORadvertise_lookup() failed.\nFailed while doing DORadvertise_announce(). " + message = message + "Anouncing with key: " + key + ", value: " + value + ", ttlval: " + str(ttlval) + integrationtestlib.handle_exception("DORadvertise_announce() failed", subject) + print message + sys.exit(0) + + # a 60 second timer to email the notify_list on slow lookups + lookup_timedout_timer = threading.Timer(60, lookup_timedout) + # start the lookup timer + lookup_timedout_timer.start() + + # get(key) from DOR + integrationtestlib.log("calling DORadvertise_lookup(key: " + str(key) + ")") + try: + ret_value = DORadvertise_lookup(key) + # TODO: check the return value as well + # ret_value = int(ret_value[0]) + except: + message = "DORadvertise_lookup() failed.\nFailed while doing DORadvertise_lookup(). " + message = message + "Looking up with key: " + key + integrationtestlib.handle_exception(message, subject) + sys.exit(0) + + lookup_timedout_timer.cancel() + lookup_done_event.set() + return + +if __name__ == "__main__": + main() + From 87a32870ee46471bdeaf187b7d36e94dfecb23e7 Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 26 Aug 2014 00:36:51 -0400 Subject: [PATCH 13/33] Update integrationtestlib.py --- integrationtestlib.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index 65e302d..bc7058b 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -26,6 +26,10 @@ import sys # the people to notify on failure/if anything goes wrong +notify_list = [ + "aot221@nyu.edu", + "at_hato_20@yahoo.com", +] def log(msg): """ @@ -76,17 +80,19 @@ def notify(text, subject): except: pass subject = subject + " @ "+ hostname + " : " + sys.argv[0] - #the people to notify if anyting goes wrong - - with open("email_address_list_file", "r") as emaillist: - notify_list = [] - for emailaddr in notify_list: - notify_list.append(line[:-1]) if line[-1] == "\n" else notify_list.append(line) - + notify_list = [] + emailFile = open("email_address_list_file", "r") + #print emailFile.read() + notify_list = emailFile.readlines() + emailItr = 0 + for email in notify_list: + notify_list[emailItr] = email.rstrip("\r\n") + emailItr+=1 + print notify_list for emailaddr in notify_list: log("notifying " + emailaddr) send_gmail.send_gmail(emailaddr, subject, text, "") - + return From 784c42c238f694915184948c4f7aa7d8914af5f9 Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 26 Aug 2014 01:21:45 -0400 Subject: [PATCH 14/33] Update integrationtestlib.py --- integrationtestlib.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index bc7058b..306a6b2 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -80,15 +80,16 @@ def notify(text, subject): except: pass subject = subject + " @ "+ hostname + " : " + sys.argv[0] + + #This will loop through a file containing emails that need to be notified and create a list out of them notify_list = [] emailFile = open("email_address_list_file", "r") - #print emailFile.read() notify_list = emailFile.readlines() emailItr = 0 for email in notify_list: notify_list[emailItr] = email.rstrip("\r\n") emailItr+=1 - print notify_list + for emailaddr in notify_list: log("notifying " + emailaddr) send_gmail.send_gmail(emailaddr, subject, text, "") From 6f391a5f202e9e15bf94ee0093108057b71f7b5a Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 26 Aug 2014 13:20:40 -0400 Subject: [PATCH 15/33] Update integrationtestlib.py --- integrationtestlib.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index 306a6b2..d5099b2 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -85,10 +85,10 @@ def notify(text, subject): notify_list = [] emailFile = open("email_address_list_file", "r") notify_list = emailFile.readlines() - emailItr = 0 + notify_list_itr = 0 for email in notify_list: notify_list[emailItr] = email.rstrip("\r\n") - emailItr+=1 + notify_list_itr+=1 for emailaddr in notify_list: log("notifying " + emailaddr) From a9460be34f5b773b97b938dc69bede32529d5985 Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 26 Aug 2014 13:20:57 -0400 Subject: [PATCH 16/33] Update integrationtestlib.py --- integrationtestlib.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index d5099b2..0f27c50 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -25,12 +25,6 @@ import socket import sys -# the people to notify on failure/if anything goes wrong -notify_list = [ - "aot221@nyu.edu", - "at_hato_20@yahoo.com", -] - def log(msg): """ From 68d09143e285c4fb469ecfdf09bd5ae38e3f5949 Mon Sep 17 00:00:00 2001 From: aot221 Date: Tue, 26 Aug 2014 13:22:22 -0400 Subject: [PATCH 17/33] Update integrationtestlib.py --- integrationtestlib.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index 0f27c50..21222d1 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -25,6 +25,7 @@ import socket import sys + def log(msg): """ @@ -80,8 +81,8 @@ def notify(text, subject): emailFile = open("email_address_list_file", "r") notify_list = emailFile.readlines() notify_list_itr = 0 - for email in notify_list: - notify_list[emailItr] = email.rstrip("\r\n") + for emailaddr in notify_list: + notify_list[notify_list_itr] = emailaddr.rstrip("\r\n") notify_list_itr+=1 for emailaddr in notify_list: From a7bd41c888981fd593af9be7fb4b82b2fd7e1608 Mon Sep 17 00:00:00 2001 From: aot221 Date: Wed, 27 Aug 2014 15:46:14 -0400 Subject: [PATCH 18/33] Now utilizes an "email_address_list_file" to read in emails. --- integrationtestlib.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index 21222d1..c19cfb0 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -78,17 +78,14 @@ def notify(text, subject): #This will loop through a file containing emails that need to be notified and create a list out of them notify_list = [] - emailFile = open("email_address_list_file", "r") - notify_list = emailFile.readlines() - notify_list_itr = 0 - for emailaddr in notify_list: - notify_list[notify_list_itr] = emailaddr.rstrip("\r\n") - notify_list_itr+=1 - - for emailaddr in notify_list: - log("notifying " + emailaddr) - send_gmail.send_gmail(emailaddr, subject, text, "") - + email_file = open("email_address_list_file", "r") + email_list = email_file.readlines() + email_file.close() + for email_address in email_list: + email_address = email_address.rstrip("\r\n") + notify_list.append(email_address) + log("notifying " + email_address) + send_gmail.send_gmail(email_address, subject, text, "") return From a26d4a9f01a60e438930e6d750aa8b01f269d669 Mon Sep 17 00:00:00 2001 From: asm582 Date: Sun, 28 Sep 2014 10:57:15 -0400 Subject: [PATCH 19/33] Removed as we do not use DOR code in production and we do not need monitoring for the same --- centralizedadvertise.repy | 1 + dorputget_new.py | 155 -------------------------------------- send_gmail.py | 2 +- 3 files changed, 2 insertions(+), 156 deletions(-) delete mode 100644 dorputget_new.py diff --git a/centralizedadvertise.repy b/centralizedadvertise.repy index 7e361c0..0881b1e 100644 --- a/centralizedadvertise.repy +++ b/centralizedadvertise.repy @@ -42,6 +42,7 @@ def centralizedadvertise_lookup(key, maxvals=100): if recvdata.endswith('OK'): return recvdata[:-len('OK')].split(',') raise Exception, "Centralized lookup failed" + finally: # BUG: This raises an error right now if the call times out ( #260 ) # This isn't a big problem, but it is the "wrong" exception diff --git a/dorputget_new.py b/dorputget_new.py deleted file mode 100644 index c110155..0000000 --- a/dorputget_new.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/python -""" - - dorputget.py - - - December 17, 2008 - - - ivan@cs.washington.edu - Ivan Beschastnikh - - - Attempt to put a (k,v) into DO registry and then get it back. On error - send an email to some folks. - - - Modify the following global var params to have this script functional: - - notify_list, a list of strings with emails denoting who will be - emailed when something goes wrong - - - GMAIL_USER and GMAIL_PWD environment variables: the username and - password of the gmail user who will be sending the email to the - emails in the notify_list (see crontab line below). - - This script takes no arguments. A typical use of this script is to - have it run periodically using something like the following crontab line: - 7 * * * * export GMAIL_USER='..' && export GMAIL_PWD='..' && /usr/bin/python /home/seattle/dorputget.py > /home/seattle/cron_log.dorputget -""" - -import time -import os -import socket -import sys -import traceback -import threading -import random - -import send_gmail -import integrationtestlib -import repyhelper - -repyhelper.translate_and_import("/home/integrationtester/cron_tests/dorputget/DORadvertise.repy") - -# Armon: This is to replace using the time command with getruntime -import nonportable - -# event for communicating when the lookup is done or timedout -lookup_done_event = threading.Event() - - - -def lookup_timedout(): - """ - - Waits for lookup_done_event and notifies the folks on the - notify_list (global var) of the lookup timeout. - - - None. - - - None. - - - Sends an email to the notify_list folks - - - None. - """ - integrationtestlib.log("in lookup_timedout()") - notify_msg = "DOR lookup failed -- lookup_timedout() fired after 60 seconds." - subject = "DOR with repy test failed" - - # wait for the event to be set, timeout after 30 minutes - wait_time = 1800 - tstamp_before_wait = nonportable.getruntime() - lookup_done_event.wait(wait_time) - tstamp_after_wait = nonportable.getruntime() - - t_waited = tstamp_after_wait - tstamp_before_wait - if abs(wait_time - t_waited) < 5: - notify_msg += " And lookup stalled for over 30 minutes (max timeout value)." - else: - notify_msg += " And lookup stalled for " + str(t_waited) + " seconds" - - integrationtestlib.notify(notify_msg,subject ) - return - -def main(): - """ - - Program's main. - - - None. - - - All exceptions are caught. - - - None. - - - None. - """ - # setup the gmail user/password to use when sending email - success,explanation_str = send_gmail.init_gmail() - if not success: - integrationtestlib.log(explanation_str) - sys.exit(0) - - integrationtestlib.notify_list.append("cemeyer@u.washington.edu") - - key = str(random.randint(4,2**30)) - value = str(random.randint(4,2**30)) - ttlval = 60 - subject = "DOR with repy test failed" - - - # put(key,value) with ttlval into DOR - integrationtestlib.log("calling DORadvertise_announce(key: " + str(key) + ", val: " + str(value) + ", ttl: " + str(ttlval) + ")") - try: - DORadvertise_announce(key, value, ttlval) - except: - message = "DORadvertise_lookup() failed.\nFailed while doing DORadvertise_announce(). " - message = message + "Anouncing with key: " + key + ", value: " + value + ", ttlval: " + str(ttlval) - integrationtestlib.handle_exception("DORadvertise_announce() failed", subject) - print message - sys.exit(0) - - # a 60 second timer to email the notify_list on slow lookups - lookup_timedout_timer = threading.Timer(60, lookup_timedout) - # start the lookup timer - lookup_timedout_timer.start() - - # get(key) from DOR - integrationtestlib.log("calling DORadvertise_lookup(key: " + str(key) + ")") - try: - ret_value = DORadvertise_lookup(key) - # TODO: check the return value as well - # ret_value = int(ret_value[0]) - except: - message = "DORadvertise_lookup() failed.\nFailed while doing DORadvertise_lookup(). " - message = message + "Looking up with key: " + key - integrationtestlib.handle_exception(message, subject) - sys.exit(0) - - lookup_timedout_timer.cancel() - lookup_done_event.set() - return - -if __name__ == "__main__": - main() - diff --git a/send_gmail.py b/send_gmail.py index 60b3eb8..60ed6e7 100644 --- a/send_gmail.py +++ b/send_gmail.py @@ -40,7 +40,7 @@ GMAIL_USER="" GMAIL_PWD="" -gmail_file_name = "/home/monzum/monitor_script/seattle_gmail_info" +gmail_file_name = "/home/abhishek/changes_monitor_script/monitor_script/seattle_gmail_info" def init_gmail(gmail_user="", gmail_pwd="", gmail_user_shvarname="GMAIL_USER", gmail_pwd_shvarname="GMAIL_PWD"): """ From ce451874058e5cb14812ee052ab381158326c483 Mon Sep 17 00:00:00 2001 From: asm582 Date: Sun, 28 Sep 2014 14:08:31 -0400 Subject: [PATCH 20/33] Add exception for socket timeout bug --- DORadvertise.repy | 216 ++++++++ DORadvertise_repy.py | 234 ++++++++ DORadvertise_repy.pyc | Bin 0 -> 6028 bytes centralizedadvertise.repy | 5 +- centralizedadvertise.repy~ | 55 ++ dorputget_new.py | 155 ++++++ harshexit.py | 174 ++++++ harshexit.pyc | Bin 0 -> 2776 bytes integrationtestlib.pyc | Bin 0 -> 3181 bytes irc_seattlebot.pyc | Bin 0 -> 2519 bytes nonportable.py | 1028 ++++++++++++++++++++++++++++++++++++ nonportable.pyc | Bin 0 -> 18576 bytes portability | 1 + repy_constants.py | 48 ++ repy_constants.pyc | Bin 0 -> 745 bytes repy_v1 | 1 + repyhelper.py | 495 +++++++++++++++++ repyhelper.pyc | Bin 0 -> 14404 bytes repyportability.py | 313 +++++++++++ repyportability.pyc | Bin 0 -> 8156 bytes safe.py | 698 ++++++++++++++++++++++++ safe.pyc | Bin 0 -> 24838 bytes seattle_gmail_info | 1 + seattle_gmail_info~ | 1 + send_gmail.pyc | Bin 0 -> 5653 bytes send_gmail.py~ | 187 +++++++ statusstorage.py | 111 ++++ statusstorage.pyc | Bin 0 -> 2406 bytes tracebackrepy.py | 230 ++++++++ tracebackrepy.pyc | Bin 0 -> 5591 bytes 30 files changed, 3952 insertions(+), 1 deletion(-) create mode 100644 DORadvertise.repy create mode 100644 DORadvertise_repy.py create mode 100644 DORadvertise_repy.pyc create mode 100644 centralizedadvertise.repy~ create mode 100644 dorputget_new.py create mode 100644 harshexit.py create mode 100644 harshexit.pyc create mode 100644 integrationtestlib.pyc create mode 100644 irc_seattlebot.pyc create mode 100755 nonportable.py create mode 100644 nonportable.pyc create mode 160000 portability create mode 100644 repy_constants.py create mode 100644 repy_constants.pyc create mode 160000 repy_v1 create mode 100644 repyhelper.py create mode 100644 repyhelper.pyc create mode 100644 repyportability.py create mode 100644 repyportability.pyc create mode 100644 safe.py create mode 100644 safe.pyc create mode 100644 seattle_gmail_info create mode 100644 seattle_gmail_info~ create mode 100644 send_gmail.pyc create mode 100644 send_gmail.py~ create mode 100755 statusstorage.py create mode 100644 statusstorage.pyc create mode 100755 tracebackrepy.py create mode 100644 tracebackrepy.pyc diff --git a/DORadvertise.repy b/DORadvertise.repy new file mode 100644 index 0000000..a7c5944 --- /dev/null +++ b/DORadvertise.repy @@ -0,0 +1,216 @@ +""" +Author: Conrad Meyer + +Start Date: Wed Dec 9 2009 + +Description: +Advertisements to the Digital Object Registry run by CNRI. + +""" + + + + +include sockettimeout.repy +include httpretrieve.repy +include xmlparse.repy + + + + +DORadvertise_FORM_LOCATION = "http://geni.doregistry.org/SeattleGENI/HashTable" + + + + +class DORadvertise_XMLError(Exception): + """ + Exception raised when the XML recieved from the Digital Object Registry + server does not match the structure we expect. + """ + pass + + + + +class DORadvertise_BadRequest(Exception): + """ + Exception raised when the Digital Object Registry interface indigates we + have made an invalid request. + """ + + + def __init__(self, errno, errstring): + self.errno = errno + self.errstring = errstring + Exception.__init__(self, "Bad DOR request (%s): '%s'" % (str(errno), errstring)) + + + + +def DORadvertise_announce(key, value, ttlval, timeout=None): + """ + + Announce a (key, value) pair to the Digital Object Registry. + + + key: + The new key the value should be stored under. + + value: + The value to associate with the given key. + + ttlval: + The length of time (in seconds) to persist this key <-> value + association in DHT. + + timeout: + The number of seconds to spend on this operation before failing + early. + + + xmlparse_XMLParseError if the xml returned isn't parseable by xmlparse. + DORadvertise_XMLError if the xml response structure does not correspond + to what we expect. + DORadvertise_BadRequest if the response indicates an error. + Any exception httpretrieve_get_string() throws (including timeout errors). + + + The key <-> value association gets stored in openDHT for a while. + + + None. + """ + + post_params = {'command': 'announce', 'key': key, 'value': value, + 'lifetime': str(int(ttlval))} + + _DORadvertise_command(post_params, timeout=timeout) + + return None + + + + + +def DORadvertise_lookup(key, maxvals=100, timeout=None): + """ + + Retrieve a stored value from the Digital Object Registry. + + + key: + The key the value is stored under. + + maxvals: + The maximum number of values stored under this key to + return to the caller. + + timeout: + The number of seconds to spend on this operation before failing + early. If not specified, the default is set to the default + timeout value for the http library (30 seconds). + + + xmlparse_XMLParseError if the xml returned isn't parseable by xmlparse. + DORadvertise_XMLError if the xml response structure does not correspond + to what we expect. + DORadvertise_BadRequest if the response indicates an error. + Any exception httpretrieve_get_string() throws (including timeout errors). + + + None. + + + The value stored in the Digital Object Registry at key. + """ + + post_params = {'command': 'lookup', 'key': key, 'maxvals': str(maxvals)} + + return _DORadvertise_command(post_params, timeout=timeout) + + + +def _DORadvertise_command(parameters, timeout=None): + # Internal helper function; calls the remote command, and returns + # the results we can glean from it. + + # If there is a timeout, use it! + if timeout != None: + post_result = httpretrieve_get_string(DORadvertise_FORM_LOCATION, \ + postdata=parameters, timeout=timeout, \ + httpheaders={"Content-Type": "application/x-www-form-urlencoded"}) + else: + post_result = httpretrieve_get_string(DORadvertise_FORM_LOCATION, \ + postdata=parameters, \ + httpheaders={"Content-Type": "application/x-www-form-urlencoded"}) + + + # Parse the result to check for success. Throw several exceptions to + # ensure the XML we're reading makes sense. + xmltree = xmlparse_parse(post_result) + + if xmltree.tag_name != "HashTableService": + raise DORadvertise_XMLError( + "Root node error. Expected: 'HashTableService', " + + "got: '%s'" % xmltree.tag_name) + + if xmltree.children is None: + raise DORadvertise_XMLError("Root node contains no children nodes.") + + # We expect to get an error code, an error string, and possibly some + # values from the server. + error_msg = None + error = None + values = None + + numxmlchildren = len(xmltree.children) + if numxmlchildren not in [2, 3]: + raise DORadvertise_XMLError("Root XML node contains inappropriate " + \ + "number of child nodes.") + + for xmlchild in xmltree.children: + # Read the numeric error code. + if xmlchild.tag_name == "status" and xmlchild.content is not None: + if error is not None: + raise DORadvertise_XMLError("XML contains multiple status tags") + error = int(xmlchild.content.strip()) + + # String error message (description:status as strerror:errno). + elif xmlchild.tag_name == "description": + if error_msg is not None: + raise DORadvertise_XMLError("XML contains multiple description tags") + error_msg = xmlchild.content + + # We found a tag. Let's try and get some values. + elif xmlchild.tag_name == "values" and xmlchild.children is not None: + if values is not None: + raise DORadvertise_XMLError("XML contains multiple values tags") + + values = [] + for valuenode in xmlchild.children: + if valuenode.tag_name != "value": + raise DORadvertise_XMLError( + "Child tag of ; expected: '', got: '<%s>'" % \ + valuenode.tag_name) + + content = valuenode.content + if content is None: + content = "" + + values.append(content) + + # Check for tags we do not expect. + elif xmlchild.tag_name not in ("status", "description", "values"): + raise DORadvertise_XMLError("Unexpected tag '" + \ + str(xmlchild.tag_name) + "' while parsing response.") + + if error is not 0: + raise DORadvertise_BadRequest(error, error_msg) + + # This happens when the server returns + if values is None: + return [] + + return values + diff --git a/DORadvertise_repy.py b/DORadvertise_repy.py new file mode 100644 index 0000000..3c21e17 --- /dev/null +++ b/DORadvertise_repy.py @@ -0,0 +1,234 @@ +### Automatically generated by repyhelper.py ### /home/integrationtester/cron_tests/dorputget/DORadvertise.repy + +### THIS FILE WILL BE OVERWRITTEN! +### DO NOT MAKE CHANGES HERE, INSTEAD EDIT THE ORIGINAL SOURCE FILE +### +### If changes to the src aren't propagating here, try manually deleting this file. +### Deleting this file forces regeneration of a repy translation + + +from repyportability import * +from repyportability import _context +import repyhelper +mycontext = repyhelper.get_shared_context() +callfunc = 'import' +callargs = [] + +""" +Author: Conrad Meyer + +Start Date: Wed Dec 9 2009 + +Description: +Advertisements to the Digital Object Registry run by CNRI. + +""" + + + + +repyhelper.translate_and_import('sockettimeout.repy') +repyhelper.translate_and_import('httpretrieve.repy') +repyhelper.translate_and_import('xmlparse.repy') + + + + +DORadvertise_FORM_LOCATION = "http://geni.doregistry.org/SeattleGENI/HashTable" + + + + +class DORadvertise_XMLError(Exception): + """ + Exception raised when the XML recieved from the Digital Object Registry + server does not match the structure we expect. + """ + pass + + + + +class DORadvertise_BadRequest(Exception): + """ + Exception raised when the Digital Object Registry interface indigates we + have made an invalid request. + """ + + + def __init__(self, errno, errstring): + self.errno = errno + self.errstring = errstring + Exception.__init__(self, "Bad DOR request (%s): '%s'" % (str(errno), errstring)) + + + + +def DORadvertise_announce(key, value, ttlval, timeout=None): + """ + + Announce a (key, value) pair to the Digital Object Registry. + + + key: + The new key the value should be stored under. + + value: + The value to associate with the given key. + + ttlval: + The length of time (in seconds) to persist this key <-> value + association in DHT. + + timeout: + The number of seconds to spend on this operation before failing + early. + + + xmlparse_XMLParseError if the xml returned isn't parseable by xmlparse. + DORadvertise_XMLError if the xml response structure does not correspond + to what we expect. + DORadvertise_BadRequest if the response indicates an error. + Any exception httpretrieve_get_string() throws (including timeout errors). + + + The key <-> value association gets stored in openDHT for a while. + + + None. + """ + + post_params = {'command': 'announce', 'key': key, 'value': value, + 'lifetime': str(int(ttlval))} + + _DORadvertise_command(post_params, timeout=timeout) + + return None + + + + + +def DORadvertise_lookup(key, maxvals=100, timeout=None): + """ + + Retrieve a stored value from the Digital Object Registry. + + + key: + The key the value is stored under. + + maxvals: + The maximum number of values stored under this key to + return to the caller. + + timeout: + The number of seconds to spend on this operation before failing + early. If not specified, the default is set to the default + timeout value for the http library (30 seconds). + + + xmlparse_XMLParseError if the xml returned isn't parseable by xmlparse. + DORadvertise_XMLError if the xml response structure does not correspond + to what we expect. + DORadvertise_BadRequest if the response indicates an error. + Any exception httpretrieve_get_string() throws (including timeout errors). + + + None. + + + The value stored in the Digital Object Registry at key. + """ + + post_params = {'command': 'lookup', 'key': key, 'maxvals': str(maxvals)} + + return _DORadvertise_command(post_params, timeout=timeout) + + + +def _DORadvertise_command(parameters, timeout=None): + # Internal helper function; calls the remote command, and returns + # the results we can glean from it. + + # If there is a timeout, use it! + if timeout != None: + post_result = httpretrieve_get_string(DORadvertise_FORM_LOCATION, \ + postdata=parameters, timeout=timeout, \ + httpheaders={"Content-Type": "application/x-www-form-urlencoded"}) + else: + post_result = httpretrieve_get_string(DORadvertise_FORM_LOCATION, \ + postdata=parameters, \ + httpheaders={"Content-Type": "application/x-www-form-urlencoded"}) + + + # Parse the result to check for success. Throw several exceptions to + # ensure the XML we're reading makes sense. + xmltree = xmlparse_parse(post_result) + + if xmltree.tag_name != "HashTableService": + raise DORadvertise_XMLError( + "Root node error. Expected: 'HashTableService', " + + "got: '%s'" % xmltree.tag_name) + + if xmltree.children is None: + raise DORadvertise_XMLError("Root node contains no children nodes.") + + # We expect to get an error code, an error string, and possibly some + # values from the server. + error_msg = None + error = None + values = None + + numxmlchildren = len(xmltree.children) + if numxmlchildren not in [2, 3]: + raise DORadvertise_XMLError("Root XML node contains inappropriate " + \ + "number of child nodes.") + + for xmlchild in xmltree.children: + # Read the numeric error code. + if xmlchild.tag_name == "status" and xmlchild.content is not None: + if error is not None: + raise DORadvertise_XMLError("XML contains multiple status tags") + error = int(xmlchild.content.strip()) + + # String error message (description:status as strerror:errno). + elif xmlchild.tag_name == "description": + if error_msg is not None: + raise DORadvertise_XMLError("XML contains multiple description tags") + error_msg = xmlchild.content + + # We found a tag. Let's try and get some values. + elif xmlchild.tag_name == "values" and xmlchild.children is not None: + if values is not None: + raise DORadvertise_XMLError("XML contains multiple values tags") + + values = [] + for valuenode in xmlchild.children: + if valuenode.tag_name != "value": + raise DORadvertise_XMLError( + "Child tag of ; expected: '', got: '<%s>'" % \ + valuenode.tag_name) + + content = valuenode.content + if content is None: + content = "" + + values.append(content) + + # Check for tags we do not expect. + elif xmlchild.tag_name not in ("status", "description", "values"): + raise DORadvertise_XMLError("Unexpected tag '" + \ + str(xmlchild.tag_name) + "' while parsing response.") + + if error is not 0: + raise DORadvertise_BadRequest(error, error_msg) + + # This happens when the server returns + if values is None: + return [] + + return values + + +### Automatically generated by repyhelper.py ### /home/integrationtester/cron_tests/dorputget/DORadvertise.repy diff --git a/DORadvertise_repy.pyc b/DORadvertise_repy.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6280023401ad2a8691242f5f4d6fbfd0be6d3253 GIT binary patch literal 6028 zcmeHL-EJGl6`oy6v?NoZVmq?)gLa(O5gk}!iUuy=2tpm(O$;P*uu_{=KtQZ^hvZoM zL(dE?36OeG`@;7Hdei$pLLZWoPnGn0w3aOO`VlNN@^&27!#a<{>C>}P%6Cw8I zL^>z-=EVT*CXb&J>AZM?1vCkr=JTC%+({S2-hxOMg<2H-K%`6JsUT{<+glRp`FgZ9 z89h&&(`E4#k}Qk#4eqwZQ^?}mEBvZW<2#)^`jNm)2`^6)V9b2L_bZ%snGg>`Xesme5M|n1ijjcvLs^;D5b_c4^ z8)<3OKwD>yH%c?;?x@(gOnr85`+oQH*baB&{Y<&Hu;}iCC{7e~Ge8jis{4NN4GmHbJdOahuRL+K{px3#Xwms zFSIL7&n8BXT=#q38MTq6Hb%!0N%A4Sb`Fo`Tr+T&(U_U~X`Dvt(O6k`R>w5?c^xAP z;FNm}Qg%pW3p{>M*)LJ6)bKacj}wJfst2)yat{?q4dcfODo&M*3%q+AXF7$sSU^&4 zLR+HmJid4EwKwrN3B(CBj7G?fKsb&@3_@l%M-vc!c-s%f>XVQo*Y9E5z5@c#f`nRX zPO@{&UcV*RuGzH?F>>@!#=!oIz6IOsV!*wLt5r7FFcXYSLSmNp2GYM(SwAATh=|5( zv-x=t|Ag34EQ??fpANB)_(2IW}qJfzQCg* z0qn%|votaVryR<%FA20|M;E{^pnGbs({4w~SYR*&)s`jRT;KBI%o zv-KyzS-!y7Z^e$d*^w$z2^!3<1oM~?d0+LR6WNb-h8UXRtYVY-J#J3Is?vOY zywI8O6`eDDFi4H9!UTAW-~|{LP`0+knuCckiH;Sye@yTF;l%7_5U@UDW=YvmSy(b( zjq#HMASsPe^g69X$5s!Av70>wB;m>QAe+K$&BoIJ3qrtz5fG3EkSXCOi4H+a3qDCuXmEN8Zr@)qDLuPN_ z9vL1yZ0)^p0+_FWBS@GIv2-}pneuca)_O&9yDTs)B|~9c#U7t`Ntx$ykuofTDI+R- zJWtJZUy;Z1)GFWD_OR7LZ1XCd8Jr;WW)xe~B}gIejMvc$dJ&~6oYDAbi7rx~U=Q%% zdT@t0w^IaswcvL)m8rGCxJQ5`-oFy8hAW{?(dKiF_Ar6QpP{3X;>|!KFwq}n1dyi< z%H)89fJTPNV5dXSZvXruB!rzfVT1K5O!9#9-%=oN<^Kf+XCV*>^8)P4<0G)OXAuN1 zbUx1I6iHaTSt3*5a%B~K6e+R_{Tk4dILjtzTY1x2j#Eyr0<$2*{XQp%fW1WbwMuU= zGNq~?k242Q0yYAA{h3pZ&TOKR)B`dH8X+46zRdK#iOsR>{QRRDVXLtIKTN)EnDm$i zOKr|T9igX>pvuhwl4P%!rMx&G`1}SqQjlh4c`zPDZ8RCJ82PH`V5H&gKxWWWa14?^6wMR_h%_#-H9J`sWy1=V`64EQL1F%MS(*7$Gxi}?)as_m7 z^hf5~6nDS*jW~s5i@d8#Y|el2nzyRK)rl%;(t5$N$$S(AwLqO&7Xlfi5(HyIf`KiJ_l{ z*vSrxYMj>qvF-^|3)t zos_9cEydy;s+1INefM~z97W@#c(H>zflid|VlXOEC@8R~%I8pypz451ai{$Lg0&mc z(uHtPI)CG|*JdbCZ6?+Q6(3}Rv@12Z2@q|&p(*;1BOo9rYpA@zoG0safvuTxWcW&9 zS22Bh^fASX9OII8u^W4h5UB~39A}swA({3Qq>BfZlPL(o1qtZQegN7pUiI88zmzc0 zpYW{x7q6;P0OG~joc#q>-616q5xQ<{`YBtVR0SATu^-%mX1!83ui33Nq)yNine5kv zFV^6uKvdT3+ZbH)IW6BBD2J}o^9@bw7>uXt=1VuNO)rKj!=>0!rIl}V!x*K4x){qU%8w}kQJVF3Mqb-T zHDl6=4T6vjY~sRZxJ34F$LjeCZp4^C2Qzz0M3!csjL6?3!bYE2pz+$Y&!TSn%I6lz dVu|k(G}yui+HNlg_{=xwSC>}Xt@ZW?-vOOrHJJba literal 0 HcmV?d00001 diff --git a/centralizedadvertise.repy b/centralizedadvertise.repy index 0881b1e..41b4e74 100644 --- a/centralizedadvertise.repy +++ b/centralizedadvertise.repy @@ -16,13 +16,16 @@ servername = "satya.cs.washington.edu" serverport = 10101 def centralizedadvertise_announce(key, value, ttlval): - +#added socket time out exception sockobj = timeout_openconn(servername,serverport, timeout=10) try: session_sendmessage(sockobj, "PUT|"+str(key)+"|"+str(value)+"|"+str(ttlval)) response = session_recvmessage(sockobj) if response != 'OK': raise Exception, "Centralized announce failed '"+response+"'" + except socket.timeout: + print "Socket timed out '"+response+"'" + finally: # BUG: This raises an error right now if the call times out ( #260 ) # This isn't a big problem, but it is the "wrong" exception diff --git a/centralizedadvertise.repy~ b/centralizedadvertise.repy~ new file mode 100644 index 0000000..9db33f4 --- /dev/null +++ b/centralizedadvertise.repy~ @@ -0,0 +1,55 @@ +""" +Author: Justin Cappos + +Start Date: July 8, 2008 + +Description: +Advertisements to a central server (similar to openDHT) + + +""" + +include session.repy +# I'll use socket timeout to prevent hanging when it takes a long time... +include sockettimeout.repy +servername = "satya.cs.washington.edu" +serverport = 10101 + +def centralizedadvertise_announce(key, value, ttlval): +#added socket time out exception and commented previous code + sockobj = timeout_openconn(servername,serverport, timeout=10) + try: + session_sendmessage(sockobj, "PUT|"+str(key)+"|"+str(value)+"|"+str(ttlval)) + response = session_recvmessage(sockobj) + if response != 'OK': + raise Exception, "Centralized announce failed '"+response+"'" + except socket.timeout: + print "Socket timed out '"+response+"'" + + finally: + # BUG: This raises an error right now if the call times out ( #260 ) + # This isn't a big problem, but it is the "wrong" exception + sockobj.close() + + return True + + + + +def centralizedadvertise_lookup(key, maxvals=100): + sockobj = timeout_openconn(servername,serverport, timeout=10) + try: + session_sendmessage(sockobj, "GET|"+str(key)+"|"+str(maxvals)) + recvdata = session_recvmessage(sockobj) + # worked + if recvdata.endswith('OK'): + return recvdata[:-len('OK')].split(',') + raise Exception, "Centralized lookup failed" + + finally: + # BUG: This raises an error right now if the call times out ( #260 ) + # This isn't a big problem, but it is the "wrong" exception + sockobj.close() + + + diff --git a/dorputget_new.py b/dorputget_new.py new file mode 100644 index 0000000..c110155 --- /dev/null +++ b/dorputget_new.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +""" + + dorputget.py + + + December 17, 2008 + + + ivan@cs.washington.edu + Ivan Beschastnikh + + + Attempt to put a (k,v) into DO registry and then get it back. On error + send an email to some folks. + + + Modify the following global var params to have this script functional: + - notify_list, a list of strings with emails denoting who will be + emailed when something goes wrong + + - GMAIL_USER and GMAIL_PWD environment variables: the username and + password of the gmail user who will be sending the email to the + emails in the notify_list (see crontab line below). + + This script takes no arguments. A typical use of this script is to + have it run periodically using something like the following crontab line: + 7 * * * * export GMAIL_USER='..' && export GMAIL_PWD='..' && /usr/bin/python /home/seattle/dorputget.py > /home/seattle/cron_log.dorputget +""" + +import time +import os +import socket +import sys +import traceback +import threading +import random + +import send_gmail +import integrationtestlib +import repyhelper + +repyhelper.translate_and_import("/home/integrationtester/cron_tests/dorputget/DORadvertise.repy") + +# Armon: This is to replace using the time command with getruntime +import nonportable + +# event for communicating when the lookup is done or timedout +lookup_done_event = threading.Event() + + + +def lookup_timedout(): + """ + + Waits for lookup_done_event and notifies the folks on the + notify_list (global var) of the lookup timeout. + + + None. + + + None. + + + Sends an email to the notify_list folks + + + None. + """ + integrationtestlib.log("in lookup_timedout()") + notify_msg = "DOR lookup failed -- lookup_timedout() fired after 60 seconds." + subject = "DOR with repy test failed" + + # wait for the event to be set, timeout after 30 minutes + wait_time = 1800 + tstamp_before_wait = nonportable.getruntime() + lookup_done_event.wait(wait_time) + tstamp_after_wait = nonportable.getruntime() + + t_waited = tstamp_after_wait - tstamp_before_wait + if abs(wait_time - t_waited) < 5: + notify_msg += " And lookup stalled for over 30 minutes (max timeout value)." + else: + notify_msg += " And lookup stalled for " + str(t_waited) + " seconds" + + integrationtestlib.notify(notify_msg,subject ) + return + +def main(): + """ + + Program's main. + + + None. + + + All exceptions are caught. + + + None. + + + None. + """ + # setup the gmail user/password to use when sending email + success,explanation_str = send_gmail.init_gmail() + if not success: + integrationtestlib.log(explanation_str) + sys.exit(0) + + integrationtestlib.notify_list.append("cemeyer@u.washington.edu") + + key = str(random.randint(4,2**30)) + value = str(random.randint(4,2**30)) + ttlval = 60 + subject = "DOR with repy test failed" + + + # put(key,value) with ttlval into DOR + integrationtestlib.log("calling DORadvertise_announce(key: " + str(key) + ", val: " + str(value) + ", ttl: " + str(ttlval) + ")") + try: + DORadvertise_announce(key, value, ttlval) + except: + message = "DORadvertise_lookup() failed.\nFailed while doing DORadvertise_announce(). " + message = message + "Anouncing with key: " + key + ", value: " + value + ", ttlval: " + str(ttlval) + integrationtestlib.handle_exception("DORadvertise_announce() failed", subject) + print message + sys.exit(0) + + # a 60 second timer to email the notify_list on slow lookups + lookup_timedout_timer = threading.Timer(60, lookup_timedout) + # start the lookup timer + lookup_timedout_timer.start() + + # get(key) from DOR + integrationtestlib.log("calling DORadvertise_lookup(key: " + str(key) + ")") + try: + ret_value = DORadvertise_lookup(key) + # TODO: check the return value as well + # ret_value = int(ret_value[0]) + except: + message = "DORadvertise_lookup() failed.\nFailed while doing DORadvertise_lookup(). " + message = message + "Looking up with key: " + key + integrationtestlib.handle_exception(message, subject) + sys.exit(0) + + lookup_timedout_timer.cancel() + lookup_done_event.set() + return + +if __name__ == "__main__": + main() + diff --git a/harshexit.py b/harshexit.py new file mode 100644 index 0000000..76bf867 --- /dev/null +++ b/harshexit.py @@ -0,0 +1,174 @@ + +# harshexit module -- Should be renamed, but I'm not sure what to. +# Provides these functions: +# portablekill: kill a function by pid +# harshexit: die, and do some things depending on the error code +# init_ostype: sets the module globals ostype and osrealtype + +# used to get information about the system we're running on +import platform +import os +import sys + +# needed for signal numbers +import signal + +# needed for changing polling constants on the Nokia N800 +import repy_constants + +# Needed for kill_process; This will fail on non-windows systems +try: + import windows_api +except: + windows_api = None + +# need for status retrieval +import statusstorage + +# This prevents writes to the nanny's status information after we want to stop +statuslock = statusstorage.statuslock + + + +ostype = None +osrealtype = None + + +# this indicates if we are exiting. Wrapping in a list to prevent needing a +# global (the purpose of this is described below) +statusexiting = [False] + + + +class UnsupportedSystemException(Exception): + pass + + + +def portablekill(pid): + global ostype + global osrealtype + + if ostype == None: + init_ostype() + + if ostype == 'Linux' or ostype == 'Darwin': + try: + os.kill(pid, signal.SIGTERM) + except: + pass + + try: + os.kill(pid, signal.SIGKILL) + except: + pass + + elif ostype == 'Windows' or ostype == 'WindowsCE': + # Use new api + windows_api.kill_process(pid) + + else: + raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" + + + +# exit all threads +def harshexit(val): + global ostype + global osrealtype + + if ostype == None: + init_ostype() + + # The problem is that there can be multiple calls to harshexit before we + # stop. For example, a signal (like we may send to kill) may trigger a + # call. As a result, we block all other status writers the first time this + # is called, but don't later on... + if not statusexiting[0]: + + # do this once (now) + statusexiting[0] = True + + # prevent concurrent writes to status info (acquire the lock to stop others, + # but do not block... + statuslock.acquire() + + # we are stopped by the stop file watcher, not terminated through another + # mechanism + if val == 4: + # we were stopped by another thread. Let's exit + pass + + # Special Termination signal to notify the NM of excessive threads + elif val == 56: + statusstorage.write_status("ThreadErr") + + elif val == 44: + statusstorage.write_status("Stopped") + + else: + # generic error, normal exit, or exitall in the user code... + statusstorage.write_status("Terminated") + + # We intentionally do not release the lock. We don't want anyone else + # writing over our status information (we're killing them). + + + if ostype == 'Linux': + # The Nokia N800 refuses to exit on os._exit() by a thread. I'm going to + # signal our pid with SIGTERM (or SIGKILL if needed) + portablekill(os.getpid()) +# os._exit(val) + elif ostype == 'Darwin': + os._exit(val) + elif ostype == 'Windows' or ostype == 'WindowsCE': + # stderr is not automatically flushed in Windows... + sys.stderr.flush() + os._exit(val) + else: + raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" + + + +# Figure out the OS type +def init_ostype(): + global ostype + global osrealtype + + # Detect whether or not it is Windows CE/Mobile + if os.name == 'ce': + ostype = 'WindowsCE' + return + + # figure out what sort of witch we are... + osrealtype = platform.system() + + # The Nokia N800 (and N900) uses the ARM architecture, + # and we change the constants on it to make disk checks happen less often + if platform.machine().startswith('armv'): + if osrealtype == 'Linux' or osrealtype == 'Darwin' or osrealtype == 'FreeBSD': + repy_constants.CPU_POLLING_FREQ_LINUX = repy_constants.CPU_POLLING_FREQ_WINCE; + repy_constants.RESOURCE_POLLING_FREQ_LINUX = repy_constants.RESOURCE_POLLING_FREQ_WINCE; + + if osrealtype == 'Linux' or osrealtype == 'Windows' or osrealtype == 'Darwin': + ostype = osrealtype + return + + # workaround for a Vista bug... + if osrealtype == 'Microsoft': + ostype = 'Windows' + return + + if osrealtype == 'FreeBSD': + ostype = 'Linux' + return + + if osrealtype.startswith('CYGWIN'): + # I do this because ps doesn't do memory info... They'll need to add + # pywin to their copy of cygwin... I wonder if I should detect its + # abscence and tell them (but continue)? + ostype = 'Windows' + return + + ostype = 'Unknown' + diff --git a/harshexit.pyc b/harshexit.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09de775c10e2605810893a9762a8ab7f3701a7ee GIT binary patch literal 2776 zcmcJR&u<$=6vyA}uI<=yezZW6LJt8&s`;UYBZ?|S>$+7$>Xvn)v_7!f?oQ%$>|JMP z+%%#min#Po;KG?R2P7_>_!BttFL2<5IKcP4^^dA*4@kwyKEIiHGxKKN=e@Q2=TdF= z-L3tICLb5ypW!h-L&W%3G$88EbVXe??RTi_Om&yK?o{`v>(Tk@v{|BVi4M~xQc7Vt zG*U(PCu>)zTOnPc{sMIuc(<@BvPOE5j$vJ=*de_{N7&Y-Z-|}}J?Jh|>~W*JLUBnH zY*!ZL0?+R>D)2w~2cNcm8|p!7#-mY|TOD`Kjn%`}<4BKelBKhwE12EDV-_GZ)>h?NkBPqw38m>z2r46`(` zSss`uPeyk8g=R0zVf{F*W(JUe=W>(8ho8?_&~Imq~pi74DDu^pCzf~ z*c~QmoShk4o$Jk(c_EaWMq;^X&qsP^^NQi9T-j`dgCsONo0c!@{YD*9R?iIESr6@Y zmTEW#L=cT&GQigimB&w#!9eDioVLIPpdNgtBloW6t-oo#$)Dp z(HF(;Z?a3&U#9+wqzo@Op9FMKPU>VY0NJGpy32EPS8z-1gzq%r3rjwM5K#Af2y)!Y z^W-D$VvajD8;x`<4D9QCn4}?Ukk8@wd9u$L;4}RH4Q`(oZ(QWboJl_CiO>AwTv*G7 zc5JwiBeh?xA613r4?Ob8BABt2?e+ zSE~*lZ?#-^-(XE`I7{e@FhM?i1WALxpdbzUc=<><%`Y+DVNt@0N(u@?R5?^t&E^_cr)tYGPmX|u|2ztX0Au)L6oKFPOY(P=xW|O2=3k8+uLp54sQ9aF9P_{ zY`uI4MhExpJDBCSI(HBJX6t2BUp+4K=R}M4dWyQ_^|yTB4)u;xOV!my zXRBzl1kEg0JeEay;8G-4?QgOsH_vYcf0c#Q4SqlP{BA933FoQg?yYcOw7o6(=s2s@__udbzsuFI34s A#{d8T literal 0 HcmV?d00001 diff --git a/integrationtestlib.pyc b/integrationtestlib.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d48d7602f2af8e5ba7044c1a653cf57c2b11eaf GIT binary patch literal 3181 zcmcIm&u9U-prdfZ{ED`n~nZjibubF`m{@v zUx@c@JVv5PiMG)Ol!i16Xd8l;L)s3fazxwFRIbr>Z7R>v_8g@V{RAiTl-8V3r*zH< z3+!{NInPV|84$&rPfFb{WiDDWSGQszhPt1~B25Zyv)*`ZG)7;%xn*T(RmujBszQky zH|=0m+_--IYd3tSvIAZ6=wn^{Se4>&HIR8OQ}pEAli%)|wS8#@S<$z;SW{^g!|ju* z9BJd$eL7HL#KL=vs7?pPs37O>jIDFS#5GIW*2dF~@>Y$>~{ zBfGmkZ0y1Zwid^m-x=AT><(+=Ki8!a!)zD6@#>Va9E+YVoujO<3P)ixT?ng;HLIdB zl^hPo7{=UbHnM!A`7p44RtR0D3iCB=kt<}PR~23MA%~gGl!>!{@N0E(*u)Q4+Fd^W z1{zDW6VS0Mw#Fe*sCWqB0g*(mliQi&xr#v zHr17l*~Oi*U*!rDxkOMBroZZQrw%Nej@YZ1THinDsu6EtV&2k)LN?)ND@&EQ-|MOF z)YUAWVTQ7tt?JF&%2ws8UW72bIk6#HO-_Zaag*iB&Ot!pa@FKCS++u)O}xl8<`Jaz zfzH*s?BF~GYInUmkVRja1T~&nT_&bmW+S`)ntJMB`CM`CfR$SSRecp+4F^HX7lJN_ zRL8>ry^F{Efo6REFoK;+6WphQQfz{I3>7=yXWJE}BO1bPxkhM9S5`SHwC_2<||P1bpJ2?zPU zDimM|z;S@@Qk)#W@s9tiAkSj&Rk2$zH4U_}8FcbxdV5x`mhtLxk;pVHkqNiIgjvCI z#g>MPXDK!@UOofNd4M=ktI2CvNBg?FtE}au_Z3*%*aFON&!6a>G4~`$5@W!Ojl+m6 z`+H8%BLI03Bc+llqt1Bf3_;{>h0<6?0WT&;FizX-2PFY#(2_n=Xp^hlgAWT?)=bse z0kG`?pllo=F~k~U2e`%Q?f}?43h=TO zEIe2*K6p3*bDa_FAPZ)2K;|)YIpqEjF;@WQfHqG6HbDRj4(u=nZbOIOhAAf)gOuQHyT?`5|uHp~AIM2sI^l59w5S z&dm0}-lznRlh^JG57^gDneF=;H?u~WJARO2hSigG-Wj#uYWJA|Mwo`WCR+S8gxge% zlpf)uWG|BwTnp#_?KceQu)sB)Ih1le-7iaBZivr~ zi0@6)qzm8ibo$k?(Q*XpFxJK-YP;);0Xq9Kad35aEKuETGiW>)xEtC54*f}1^t3be z*Lh+)ZYW6{A0+L|nAhaWZLe^1f!#O&a-4}BkCdYo<3M?ERsZX%2TbijgznzcOB+&s;%6E9ZdJkAlVELa-Qp7_Q*G6s$zcVbi6P zNh(QF-A$7AGUWCr+~A=$54pVhbKp$1uc0~FiIe{CZwNk&&0MF|P~Ex++3^16!?U0q L_ZKZ*TD`PiBHt*yVx_TZ0|b{Ddrh2JOms!Jq3 zKnE!UKL;U$4h}}+9CUKyu>~D#JkCQW555JW06mnYdL6DO5{O`#NIqi;*-RSc+-o-md$yPbX_GptwV7f>(#-PZEv=; zmuTVB^0QP5{^d8H^RL#|zs03$s{3(5EBmqhB~AE#+V`T!^N|w~Cwh{uUU;e>$P*pQ zRpF<0@HhUNdLHdJoLViGQgn3?%N;KWZw`v#>Qi!)d=(fO1E*ENF;sF$G_hS*qSvmg zZ8M2tKMip@l0p`kk=nu0T0f4&ns?j}R9~E}b^D$?5z0ju105%>>L$TJue~+BbrudO zeSohbABwC?UQXki$G3*B`WiWy#G%RK5R+)Z5Yv~3AttT>!vf`JScI!J&;X|x2vTNf z4qoM8I0e^JaB9IdgXR9$?@LRYgGz-_o2~!v06f37}j9pgY8l5VEe~;$dKdr;CWV}L)_ecBYP$^m+Th8{+ zoBcok2`@8O7T&OuF&`KynoOIb^xwmZT%^PM9SPRW=3$x)Vr9ya^Miqqil+na@$OBj za2->nuJ#fkWbBJoDKz(_&vjo&u7vaz%11&e??l+7vwqnX1FAAb#mafRpTrj^B)J%` z@Sw+2=^clH)Mv&rOGZa4@CC2;dRVTRmS!vks#A05`P=>kGaq1jsmfcUe!>o+VtX2E zeSgAr+d^sLO-*>F=s(WdP!({Y*{D6|rs~SJVH~yU4%bwFT$PDtMo<&qFcQxW8ciP1 zW<_GQQF`b!zTa={j#TlDfL4t3Vby7KRmACcM~!ybZdRzNFtNBEchAJgzv_Z8)B*})I|Cok}PMg z(g{HZ>DfDIwawu0z-gO3jqQ4~Z6JWorCHstn+dw|!c6EWYkSpZv;M*;rAD3ghF>Cb+UwxsLzl?@t5PMn`lT1qattM-s zzmtaHvAmX~{1Ee~U+YHdvOu1DVSpB+IF+ce$Gzkv zjfB+y)n{bmh#Gp-!A@$teDk*A7lH0mSC18rQDMTk2*QvbV~_X67$FETOkl*rVi^aB z7CEn9deI;h8{``2gDbr7?lSy%PlRF2>*pf5!oXKn9;19f_k?+Uz)96?vuZOJE zr?_qc+xU0&n^|LQnU-xOMqsD5+wKVpk8PAwoQ=4P3BtR-vui|I$R>>uK9VSWGr literal 0 HcmV?d00001 diff --git a/nonportable.py b/nonportable.py new file mode 100755 index 0000000..2c0a9d3 --- /dev/null +++ b/nonportable.py @@ -0,0 +1,1028 @@ +""" +Author: Justin Cappos + +Start Date: July 1st, 2008 + +Description: +Handles exiting and killing all threads, tracking CPU / Mem usage, etc. + + +""" + + +import threading +import os +import time + +# needed for sys.stderr and windows Popen hackery +import sys + +# needed for signal numbers +import signal + +# needed for harshexit +import harshexit + +# print useful info when exiting... +import tracebackrepy + +# used to query status, etc. +# This may fail on Windows CE +try: + import subprocess + mobile_no_subprocess = False +except ImportError: + # Set flag to avoid using subprocess + mobile_no_subprocess = True + + +# used for socket.error +import socket + +# need for status retrieval +import statusstorage + +# Get constants +import repy_constants + +# Get access to the status interface so we can start it +import nmstatusinterface + +# This gives us our restrictions information +import nanny_resource_limits + +# This is used for IPC +import marshal + +# This will fail on non-windows systems +try: + import windows_api as windows_api +except: + windows_api = None + +# Armon: This is a place holder for the module that will be imported later +os_api = None + +# Armon: See additional imports at the bottom of the file + +class UnsupportedSystemException(Exception): + pass + + + +################### Publicly visible functions ####################### + +# check the disk space used by a dir. +def compute_disk_use(dirname): + # Convert path to absolute + dirname = os.path.abspath(dirname) + + diskused = 0 + + for filename in os.listdir(dirname): + try: + diskused = diskused + os.path.getsize(os.path.join(dirname, filename)) + except IOError: # They likely deleted the file in the meantime... + pass + except OSError: # They likely deleted the file in the meantime... + pass + + # charge an extra 4K for each file to prevent lots of little files from + # using up the disk. I'm doing this outside of the except clause in + # the failure to get the size wasn't related to deletion + diskused = diskused + 4096 + + return diskused + + +# prepare a socket so it behaves how we want +def preparesocket(socketobject): + + if ostype == 'Windows': + # we need to set a timeout because on rare occasions Windows will block + # on recvmess with a bad socket. This prevents it from locking the system. + # We use select, so the timeout should never be actually used. + + # The actual value doesn't seem to matter, so I'll use 100 years + socketobject.settimeout(60*60*24*365*100) + + elif ostype == 'Linux' or ostype == 'Darwin': + # Linux seems not to care if we set the timeout, Mac goes nuts and refuses + # to let you send from a socket you're receiving on (why?) + pass + + elif ostype == "WindowsCE": + # No known issues, so just go + pass + + else: + raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" + + +# Armon: Also launches the nmstatusinterface thread. +# This will result in an internal thread on Windows +# and a thread on the external process for *NIX +def monitor_cpu_disk_and_mem(): + if ostype == 'Linux' or ostype == 'Darwin': + # Startup a CPU monitoring thread/process + do_forked_resource_monitor() + + elif ostype == 'Windows' or ostype == 'WindowsCE': + # Now we set up a cpu nanny... + # Use an external CPU monitor for WinCE + if ostype == 'WindowsCE': + nannypath = "\"" + repy_constants.PATH_SEATTLE_INSTALL + 'win_cpu_nanny.py' + "\"" + cmdline = str(os.getpid())+" "+str(nanny_resource_limits.resource_limit("cpu"))+" "+str(repy_constants.CPU_POLLING_FREQ_WINCE) + windows_api.launch_python_script(nannypath, cmdline) + else: + WinCPUNannyThread().start() + + # Launch mem./disk resource nanny + WindowsNannyThread().start() + + # Start the nmstatusinterface. Windows means repy isn't run in an external + # process, so pass None instead of a process id. + nmstatusinterface.launch(None) + else: + raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" + + + + +# Elapsed time +elapsedtime = 0 + +# Store the uptime of the system when we first get loaded +starttime = 0 +last_uptime = 0 + +# Timestamp from our starting point +last_timestamp = time.time() + +# This is our uptime granularity +granularity = 1 + +# This ensures only one thread calling getruntime at any given time +runtimelock = threading.Lock() + +def getruntime(): + """ + + Return the amount of time the program has been running. This is in + wall clock time. This function is not guaranteed to always return + increasing values due to NTP, etc. + + + None + + + None. + + + None + + + By default this will have the same granularity as the system clock. However, if time + goes backward due to NTP or other issues, getruntime falls back to system uptime. + This has much lower granularity, and varies by each system. + + + The elapsed time as float + """ + global starttime, last_uptime, last_timestamp, elapsedtime, granularity, runtimelock + + # Get the lock + runtimelock.acquire() + + # Check if Linux or BSD/Mac + if ostype in ["Linux", "Darwin"]: + uptime = os_api.get_system_uptime() + + # Check if time is going backward + if uptime < last_uptime: + # If the difference is less than 1 second, that is okay, since + # The boot time is only precise to 1 second + if (last_uptime - uptime) > 1: + raise EnvironmentError, "Uptime is going backwards!" + else: + # Use the last uptime + uptime = last_uptime + + # No change in uptime + diff_uptime = 0 + else: + # Current uptime, minus the last uptime + diff_uptime = uptime - last_uptime + + # Update last uptime + last_uptime = uptime + + # Check for windows + elif ostype in ["Windows", "WindowsCE"]: + # Release the lock + runtimelock.release() + + # Time.clock returns elapsedtime since the first call to it, so this works for us + return time.clock() + + # Who knows... + else: + raise EnvironmentError, "Unsupported Platform!" + + # Current uptime minus start time + runtime = uptime - starttime + + # Get runtime from time.time + current_time = time.time() + + # Current time, minus the last time + diff_time = current_time - last_timestamp + + # Update the last_timestamp + last_timestamp = current_time + + # Is time going backward? + if diff_time < 0.0: + # Add in the change in uptime + elapsedtime += diff_uptime + + # Lets check if time.time is too skewed + else: + skew = abs(elapsedtime + diff_time - runtime) + + # If the skew is too great, use uptime instead of time.time() + if skew < granularity: + elapsedtime += diff_time + else: + elapsedtime += diff_uptime + + # Release the lock + runtimelock.release() + + # Return the new elapsedtime + return elapsedtime + + +# This lock is used to serialize calls to get_resouces +get_resources_lock = threading.Lock() + +# These are the resources we expose in get_resources +exposed_resources = set(["cpu","memory","diskused","events", + "filewrite","fileread","filesopened", + "insockets","outsockets","netsend", + "netrecv","loopsend","looprecv", + "lograte","random","messport","connport"]) + +# These are the resources that we don't flatten using +# len() for the usage. For example, instead of given the +# set of thread's, we flatten this into N number of threads. +flatten_exempt_resources = set(["connport","messport"]) + +# Cache the disk used from the external process +cached_disk_used = 0L + +# This array holds the times that repy was stopped. +# It is an array of tuples, of the form (time, amount) +# where time is when repy was stopped (from getruntime()) and amount +# is the stop time in seconds. The last process_stopped_max_entries are retained +process_stopped_timeline = [] +process_stopped_max_entries = 100 + +# Method to expose resource limits and usage +def get_resources(): + """ + + Returns the resouce utilization limits as well + as the current resource utilization. + + + None. + + + A tuple of dictionaries and an array (limits, usage, stoptimes). + + Limits is the dictionary which maps the resouce name + to its maximum limit. + + Usage is the dictionary which maps the resource name + to its current usage. + + Stoptimes is an array of tuples with the times which the Repy proces + was stopped and for how long, due to CPU over-use. + Each entry in the array is a tuple (TOS, Sleep Time) where TOS is the + time of stop (respective to getruntime()) and Sleep Time is how long the + repy process was suspended. + + The stop times array holds a fixed number of the last stop times. + Currently, it holds the last 100 stop times. + """ + # Acquire the lock + get_resources_lock.acquire() + + # Construct the dictionaries as copies from nanny + limits = nanny_resource_limits.resource_restriction_table.copy() + usage = nanny_resource_limits.resource_consumption_table.copy() + + # These are the type we need to copy or flatten + check_types = set([list,dict,set]) + + # Check the limits dictionary for bad keys + for resource in limits.keys(): + # Remove any resources we should not expose + if resource not in exposed_resources: + del limits[resource] + + # Check the type + if type(limits[resource]) in check_types: + # Copy the data structure + limits[resource] = limits[resource].copy() + + # Check the usage dictionary + for resource in usage.keys(): + # Remove any resources that are not exposed + if resource not in exposed_resources: + del usage[resource] + + # Check the type, copy any data structures + # Flatten any structures using len() other than + # "connport" and "messport" + if type(usage[resource]) in check_types: + # Check if they are exempt from flattening, store a shallow copy + if resource in flatten_exempt_resources: + usage[resource] = usage[resource].copy() + + # Store the size of the data set + else: + usage[resource] = len(usage[resource]) + + + + # Calculate all the usage's + pid = os.getpid() + + # Get CPU and memory, this is thread specific + if ostype in ["Linux", "Darwin"]: + + # Get CPU first, then memory + usage["cpu"] = os_api.get_process_cpu_time(pid) + + # This uses the cached PID data from the CPU check + usage["memory"] = os_api.get_process_rss() + + # Get the thread specific CPU usage + usage["threadcpu"] = os_api.get_current_thread_cpu_time() + + + # Windows Specific versions + elif ostype in ["Windows","WindowsCE"]: + + # Get the CPU time + usage["cpu"] = windows_api.get_process_cpu_time(pid) + + # Get the memory, use the resident set size + usage["memory"] = windows_api.process_memory_info(pid)['WorkingSetSize'] + + # Get thread-level CPU + usage["threadcpu"] = windows_api.get_current_thread_cpu_time() + + # Unknown OS + else: + raise EnvironmentError("Unsupported Platform!") + + # Use the cached disk used amount + usage["diskused"] = cached_disk_used + + # Release the lock + get_resources_lock.release() + + # Copy the stop times + stoptimes = process_stopped_timeline[:] + + # Return the dictionaries and the stoptimes + return (limits,usage,stoptimes) + + +################### Windows specific functions ####################### + +class WindowsNannyThread(threading.Thread): + + def __init__(self): + threading.Thread.__init__(self,name="NannyThread") + + def run(self): + # Calculate how often disk should be checked + if ostype == "WindowsCE": + disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_WINCE / repy_constants.CPU_POLLING_FREQ_WINCE) + else: + disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_WIN / repy_constants.CPU_POLLING_FREQ_WIN) + current_interval = 0 # What cycle are we on + + # Elevate our priority, above normal is higher than the usercode, and is enough for disk/mem + windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL) + + # need my pid to get a process handle... + mypid = os.getpid() + + # run forever (only exit if an error occurs) + while True: + try: + # Check memory use, get the WorkingSetSize or RSS + memused = windows_api.process_memory_info(mypid)['WorkingSetSize'] + + if memused > nanny_resource_limits.resource_limit("memory"): + # We will be killed by the other thread... + raise Exception, "Memory use '"+str(memused)+"' over limit '"+str(nanny_resource_limits.resource_limit("memory"))+"'" + + # Increment the interval we are on + current_interval += 1 + + # Check if we should check the disk + if (current_interval % disk_interval) == 0: + # Check diskused + diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR) + if diskused > nanny_resource_limits.resource_limit("diskused"): + raise Exception, "Disk use '"+str(diskused)+"' over limit '"+str(nanny_resource_limits.resource_limit("diskused"))+"'" + + if ostype == 'WindowsCE': + time.sleep(repy_constants.CPU_POLLING_FREQ_WINCE) + else: + time.sleep(repy_constants.CPU_POLLING_FREQ_WIN) + + except windows_api.DeadProcess: + # Process may be dead, or die while checking memory use + # In any case, there is no reason to continue running, just exit + harshexit.harshexit(99) + + except: + tracebackrepy.handle_exception() + print >> sys.stderr, "Nanny died! Trying to kill everything else" + harshexit.harshexit(20) + + +# Windows specific CPU Nanny Stuff +winlastcpuinfo = [0,0] + +# Enfoces CPU limit on Windows and Windows CE +def win_check_cpu_use(cpulim, pid): + global winlastcpuinfo + + # get use information and time... + now = getruntime() + + # Get the total cpu time + usertime = windows_api.get_process_cpu_time(pid) + + useinfo = [usertime, now] + + # get the previous time and cpu so we can compute the percentage + oldusertime = winlastcpuinfo[0] + oldnow = winlastcpuinfo[1] + + if winlastcpuinfo == [0,0]: + winlastcpuinfo = useinfo + # give them a free pass if it's their first time... + return 0 + + # save this data for next time... + winlastcpuinfo = useinfo + + # Get the elapsed time... + elapsedtime = now - oldnow + + # This is a problem + if elapsedtime == 0: + return -1 # Error condition + + # percent used is the amount of change divided by the time... + percentused = (usertime - oldusertime) / elapsedtime + + # Calculate amount of time to sleep for + stoptime = nanny_resource_limits.calculate_cpu_sleep_interval(cpulim, percentused,elapsedtime) + + if stoptime > 0.0: + # Try to timeout the process + if windows_api.timeout_process(pid, stoptime): + # Log the stoptime + process_stopped_timeline.append((now, stoptime)) + + # Drop the first element if the length is greater than the maximum entries + if len(process_stopped_timeline) > process_stopped_max_entries: + process_stopped_timeline.pop(0) + + # Return how long we slept so parent knows whether it should sleep + return stoptime + + else: + # Process must have been making system call, try again next time + return -1 + + # If the stop time is 0, then avoid calling timeout_process + else: + return 0.0 + + +# Dedicated Thread for monitoring CPU, this is run as a part of repy +class WinCPUNannyThread(threading.Thread): + # Thread variables + pid = 0 # Process pid + + def __init__(self): + self.pid = os.getpid() + threading.Thread.__init__(self,name="CPUNannyThread") + + def run(self): + # Elevate our priority, set us to the highest so that we can more effectively throttle + success = windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_HIGHEST) + + # If we failed to get HIGHEST priority, try above normal, else we're still at default + if not success: + windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL) + + # Run while the process is running + while True: + try: + # Get the frequency + frequency = repy_constants.CPU_POLLING_FREQ_WIN + + # Base amount of sleeping on return value of + # win_check_cpu_use to prevent under/over sleeping + slept = win_check_cpu_use(nanny_resource_limits.resource_limit("cpu"), self.pid) + + if slept == -1: + # Something went wrong, try again + pass + elif (slept < frequency): + time.sleep(frequency-slept) + + except windows_api.DeadProcess: + # Process may be dead + harshexit.harshexit(97) + + except: + tracebackrepy.handle_exception() + print >> sys.stderr, "CPU Nanny died! Trying to kill everything else" + harshexit.harshexit(25) + + + + + + + +############## *nix specific functions (may include Mac) ############### + +# This method handles messages on the "diskused" channel from +# the external process. When the external process measures disk used, +# it is piped in and cached for calls to getresources. +def IPC_handle_diskused(bytes): + cached_disk_used = bytes + + +# This method handles meessages on the "repystopped" channel from +# the external process. When the external process stops repy, it sends +# a tuple with (TOS, amount) where TOS is time of stop (getruntime()) and +# amount is the amount of time execution was suspended. +def IPC_handle_stoptime(info): + # Push this onto the timeline + process_stopped_timeline.append(info) + + # Drop the first element if the length is greater than the max + if len(process_stopped_timeline) > process_stopped_max_entries: + process_stopped_timeline.pop(0) + + +# Use a special class of exception for when +# resource limits are exceeded +class ResourceException(Exception): + pass + + +# Armon: Method to write a message to the pipe, used for IPC. +# This allows the pipe to be multiplexed by sending simple dictionaries +def write_message_to_pipe(writehandle, channel, data): + """ + + Writes a message to the pipe + + + writehandle: + A handle to a pipe which can be written to. + + channel: + The channel used to describe the data. Used for multiplexing. + + data: + The data to send. + + + As with os.write() + EnvironmentError will be thrown if os.write() sends 0 bytes, indicating the + pipe is broken. + """ + # Construct the dictionary + mesg_dict = {"ch":channel,"d":data} + + # Convert to a string + mesg_dict_str = marshal.dumps(mesg_dict) + + # Make a full string + mesg = str(len(mesg_dict_str)) + ":" + mesg_dict_str + + # Send this + index = 0 + while index < len(mesg): + bytes = os.write(writehandle, mesg[index:]) + if bytes == 0: + raise EnvironmentError, "Write send 0 bytes! Pipe broken!" + index += bytes + + +# Armon: Method to read a message from the pipe, used for IPC. +# This allows the pipe to be multiplexed by sending simple dictionaries +def read_message_from_pipe(readhandle): + """ + + Reads a message from a pipe. + + + readhandle: + A handle to a pipe which can be read from + + + As with os.read(). + EnvironmentError will be thrown if os.read() returns a 0-length string, indicating + the pipe is broken. + + + A tuple (Channel, Data) where Channel is used to multiplex the pipe. + """ + # Read until we get to a colon + data = "" + index = 0 + + # Loop until we get a message + while True: + + # Read in data if the buffer is empty + if index >= len(data): + # Read 8 bytes at a time + mesg = os.read(readhandle,8) + if len(mesg) == 0: + raise EnvironmentError, "Read returned emtpy string! Pipe broken!" + data += mesg + + # Increment the index while there is data and we have not found a colon + while index < len(data) and data[index] != ":": + index += 1 + + # Check if we've found a colon + if len(data) > index and data[index] == ":": + # Get the message length + mesg_length = int(data[:index]) + + # Determine how much more data we need + more_data = mesg_length - len(data) + index + 1 + + # Read in the rest of the message + while more_data > 0: + mesg = os.read(readhandle, more_data) + if len(mesg) == 0: + raise EnvironmentError, "Read returned emtpy string! Pipe broken!" + data += mesg + more_data -= len(mesg) + + # Done, convert the message to a dict + whole_mesg = data[index+1:] + mesg_dict = marshal.loads(whole_mesg) + + # Return a tuple (Channel, Data) + return (mesg_dict["ch"],mesg_dict["d"]) + + + +# This dictionary defines the functions that handle messages +# on each channel. E.g. when a message arrives on the "repystopped" channel, +# the IPC_handle_stoptime function should be invoked to handle it. +IPC_HANDLER_FUNCTIONS = {"repystopped":IPC_handle_stoptime, + "diskused":IPC_handle_diskused } + + +# This thread checks that the parent process is alive and invokes +# delegate methods when messages arrive on the pipe. +class parent_process_checker(threading.Thread): + def __init__(self, readhandle): + """ + + Terminates harshly if our parent dies before we do. + + + readhandle: A file descriptor to the handle of a pipe to our parent. + """ + # Name our self + threading.Thread.__init__(self, name="ParentProcessChecker") + + # Store the handle + self.readhandle = readhandle + + def run(self): + # Run forever + while True: + # Read a message + try: + mesg = read_message_from_pipe(self.readhandle) + except Exception, e: + break + + # Check for a handler function + if mesg[0] in IPC_HANDLER_FUNCTIONS: + # Invoke the handler function with the data + handler = IPC_HANDLER_FUNCTIONS[mesg[0]] + handler(mesg[1]) + + # Print a message if there is a message on an unknown channel + else: + print "[WARN] Message on unknown channel from parent process:", mesg[0] + + + ### We only leave the loop on a fatal error, so we need to exit now + + # Write out status information, our parent would do this, but its dead. + statusstorage.write_status("Terminated") + print >> sys.stderr, "Monitor process died! Terminating!" + harshexit.harshexit(70) + + + +# For *NIX systems, there is an external process, and the +# pid for the actual repy process is stored here +repy_process_id = None + +# Forks Repy. The child will continue execution, and the parent +# will become a resource monitor +def do_forked_resource_monitor(): + global repy_process_id + + # Get a pipe + (readhandle, writehandle) = os.pipe() + + # I'll fork a copy of myself + childpid = os.fork() + + if childpid == 0: + # We are the child, close the write end of the pipe + os.close(writehandle) + + # Start a thread to check on the survival of the parent + parent_process_checker(readhandle).start() + + return + else: + # We are the parent, close the read end + os.close(readhandle) + + # Store the childpid + repy_process_id = childpid + + # Start the nmstatusinterface + nmstatusinterface.launch(repy_process_id) + + # Small internal error handler function + def _internal_error(message): + try: + print >> sys.stderr, message + sys.stderr.flush() + except: + pass + + # Stop the nmstatusinterface, we don't want any more status updates + nmstatusinterface.stop() + + # Kill repy + harshexit.portablekill(childpid) + + try: + # Write out status information, repy was Stopped + statusstorage.write_status("Terminated") + except: + pass + + try: + # Some OS's require that you wait on the pid at least once + # before they do any accounting + (pid, status) = os.waitpid(childpid,os.WNOHANG) + + # Launch the resource monitor, if it fails determine why and restart if necessary + resource_monitor(childpid, writehandle) + + except ResourceException, exp: + # Repy exceeded its resource limit, kill it + _internal_error(str(exp)+" Impolitely killing child!") + harshexit.harshexit(98) + + except Exception, exp: + # There is some general error... + try: + (pid, status) = os.waitpid(childpid,os.WNOHANG) + except: + # This means that the process is dead + pass + + # Check if this is repy exiting + if os.WIFEXITED(status) or os.WIFSIGNALED(status): + sys.exit(0) + + else: + _internal_error(str(exp)+" Monitor death! Impolitely killing child!") + raise + +def resource_monitor(childpid, pipe_handle): + """ + + Function runs in a loop forever, checking resource usage and throttling CPU. + Checks CPU, memory, and disk. + + + childpid: + The child pid, e.g. the pid of repy + + pipe_handle: + A handle to the pipe to the repy process. Allows sending resource use information. + """ + # Get our pid + ourpid = os.getpid() + + # Calculate how often disk should be checked + disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_LINUX / repy_constants.CPU_POLLING_FREQ_LINUX) + current_interval = -1 # What cycle are we on + + # Store time of the last interval + last_time = getruntime() + last_CPU_time = 0 + resume_time = 0 + + # Run forever... + while True: + ########### Check CPU ########### + # Get elasped time + currenttime = getruntime() + elapsedtime1 = currenttime - last_time # Calculate against last run + elapsedtime2 = currenttime - resume_time # Calculate since we last resumed repy + elapsedtime = min(elapsedtime1, elapsedtime2) # Take the minimum interval + last_time = currenttime # Save the current time + + # Safety check, prevent ZeroDivisionError + if elapsedtime == 0.0: + continue + + # Get the total cpu at this point + totalCPU = os_api.get_process_cpu_time(ourpid) # Our own usage + totalCPU += os_api.get_process_cpu_time(childpid) # Repy's usage + + # Calculate percentage of CPU used + percentused = (totalCPU - last_CPU_time) / elapsedtime + + # Do not throttle for the first interval, wrap around + # Store the totalCPU for the next cycle + if last_CPU_time == 0: + last_CPU_time = totalCPU + continue + else: + last_CPU_time = totalCPU + + # Calculate stop time + stoptime = nanny_resource_limits.calculate_cpu_sleep_interval(nanny_resource_limits.resource_limit("cpu"), percentused, elapsedtime) + + # If we are supposed to stop repy, then suspend, sleep and resume + if stoptime > 0.0: + # They must be punished by stopping + os.kill(childpid, signal.SIGSTOP) + + # Sleep until time to resume + time.sleep(stoptime) + + # And now they can start back up! + os.kill(childpid, signal.SIGCONT) + + # Save the resume time + resume_time = getruntime() + + # Send this information as a tuple containing the time repy was stopped and + # for how long it was stopped + write_message_to_pipe(pipe_handle, "repystopped", (currenttime, stoptime)) + + + ########### End Check CPU ########### + # + ########### Check Memory ########### + + # Get how much memory repy is using + memused = os_api.get_process_rss() + + # Check if it is using too much memory + if memused > nanny_resource_limits.resource_limit("memory"): + raise ResourceException, "Memory use '"+str(memused)+"' over limit '"+str(nanny_resource_limits.resource_limit("memory"))+"'." + + ########### End Check Memory ########### + # + ########### Check Disk Usage ########### + # Increment our current cycle + current_interval += 1; + + # Check if it is time to check the disk usage + if (current_interval % disk_interval) == 0: + # Reset the interval + current_interval = 0 + + # Calculate disk used + diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR) + + # Raise exception if we are over limit + if diskused > nanny_resource_limits.resource_limit("diskused"): + raise ResourceException, "Disk use '"+str(diskused)+"' over limit '"+str(nanny_resource_limits.resource_limit("diskused"))+"'." + + # Send the disk usage information, raw bytes used + write_message_to_pipe(pipe_handle, "diskused", diskused) + + ########### End Check Disk ########### + + # Sleep before the next iteration + time.sleep(repy_constants.CPU_POLLING_FREQ_LINUX) + + +########### functions that help me figure out the os type ########### + +# Calculates the system granularity +def calculate_granularity(): + global granularity + + if ostype in ["Windows", "WindowsCE"]: + # The Granularity of getTickCount is 1 millisecond + granularity = pow(10,-3) + + elif ostype == "Linux": + # We don't know if the granularity is correct yet + correct_granularity = False + + # How many times have we tested + tests = 0 + + # Loop while the granularity is incorrect, up to 10 times + while not correct_granularity and tests <= 10: + current_granularity = os_api.get_uptime_granularity() + uptime_pre = os_api.get_system_uptime() + time.sleep(current_granularity / 10) + uptime_post = os_api.get_system_uptime() + + diff = uptime_post - uptime_pre + + correct_granularity = int(diff / current_granularity) == (diff / current_granularity) + tests += 1 + + granularity = current_granularity + + elif ostype == "Darwin": + granularity = os_api.get_uptime_granularity() + + + +# Call init_ostype!!! +harshexit.init_ostype() + +ostype = harshexit.ostype +osrealtype = harshexit.osrealtype + +# Import the proper system wide API +if osrealtype == "Linux": + import linux_api as os_api +elif osrealtype == "Darwin": + import darwin_api as os_api +elif osrealtype == "FreeBSD": + import freebsd_api as os_api +elif ostype == "Windows" or ostype == "WindowsCE": + # There is no real reason to do this, since windows is imported separately + import windows_api as os_api +else: + # This is a non-supported OS + raise UnsupportedSystemException, "The current Operating System is not supported! Fatal Error." + +# Set granularity +calculate_granularity() + +# For Windows, we need to initialize time.clock() +if ostype in ["Windows", "WindowsCE"]: + time.clock() + +# Initialize getruntime for other platforms +else: + # Set the starttime to the initial uptime + starttime = getruntime() + last_uptime = starttime + + # Reset elapsed time + elapsedtime = 0 + + +# Conrad: initialize nanny (Prevents circular imports) +# Note: nanny_resource_limits can be initialized at any time after getruntime() +# is defined, this just seems the most appropriate place to put the call. +nanny_resource_limits.init(getruntime) diff --git a/nonportable.pyc b/nonportable.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f93e0ca4ef34b18f154aebee5aa93fe09a71006b GIT binary patch literal 18576 zcmc&+Yiu0Xb-uH+TyocZi4{yN>r=}>%q9aqvkP2-!?q;|%B!}Fc zSrN?e~Kh6&<08C07-);Kp#lcq(xDnMN^=Ef&%%`0BwN$DvGv1(I0J* zw%>Q|%UO%S$lQg?{m&~&bedrU;9drO+B(yRq3B0{QE3^@uy9tT%}eJ8p_S7 znxR$@(ea#G$z^dJ)+zPl-sLZtPxAYiZENu ze@MB*$~~%F%nbX6nP3hW9pfHT>qpdG9L=NB9CLV3t>+}?G36dtE2q>t=3G|pJJiZ) z<(`mn->F8#(tzN@AFY1}0 z>Qic~prSRCw_`?fgAwJOWXw6PlH>F@;s@5O(`bZoskE3lQDR+j5|2f-J?mVYOj+m8o_)Ghy5hyv$ZsTm7+fq}a{{;K z#g@10qy4&tq_ySOY7(#2tYjnd95*biv9sao8(Eut7U^Q8+aOJ$&M~xJ4klGTDG0mUBpV8jAl)tLS0xR`hn{fy ztiD}V%SbJjIlToRf%4TL_Jg~YaQb1KI_L9V}dtoz? z%P)Ch41%w*N|{|Iy$Is4y5%L|>ZVss?!)o+VTeZLHJr%PBk`zWtORGmvr2GRyu;Gi}O>or2Xo7zR z!5(;<+ztyj7If5T{&Y9X9!9J3Q3N&vVeoEPSqq~r&#mytno-ruSS=Q&6wY$5QVoMR zae^dH*zD%a(zVLs+|1I_^|{LY!s61*_3JX9IEie|OYSDvQNwqW5u|16?e1%S-A7lt z_jE|fOUMHtlPWiFT)#fQ@Lc7pJ@luoWc2iqr6AA~mNOR5vQvlZ`M) z1w+D(1mU5Z1@73A2tA=uEFns8AT7e|tiWbL9aWNM4D$7&HK*#yLi8xYRu{s+6Y{b- zQ!U*{=(4~TQ?Axs$lUwEJjr-6!N)rx3>0F%Qupe(<0%t0#&abQTCXu;Tr`GreP&;N zFsCI8uT>R45rjOdS^VM&WP|j;Lhpjg$P!C}Xli7Pn|bDRslm@f1IV5U#YxZQf(9w9pLUG~<)-Z*4Q zjYIlEEP+rt5r}0|;W~`p$us7FG@+anWnzCr%7iq%;Pk5Cf=Yx^Q9K3-Jr4T5gK2{~ z^h;&KVCB57T{}Sh|^EJj~$I zOtjvtdm!sQ1!PD!9C7x!#)7tR>hI zEMW(T6dUr2-k}&M!@?c~&6*SW$)1Is;%24-Qnq8-x)$zuuXxdv<)@pQE_6M_+ExK( zcbv%WSfmw3R)}sQ>{*QUPg&qb*n2scHSDu)#Vtt(Y68;Mz3H@HIU62Yy;sK&g4!>u44r=L+(jC&*qO}K0SdmwA8o*cja-i$ATIeJgug(5_Qm1CEaBIi zE19J{L}RtQayPjP`7@Gf9J@DABnn;{6?vpH>B!qNXUD$3z@9 zya1{JtNMY)b+Ja6%_K{5=K`o8Uf{|lK|J!RuZTWT3q#y}6!f4H6H>w!H8Mzy%X0x2 zaKpOP!Li2F=Oh92Ak@GH7KJ zY7yinz%GEP7Q3qkVAAFgr5_goIBcp*%o=7@rt>;|G)s4>OTjJaLC@uD29VvGdO+4_ z+9?G1MAi|q_jS~(5?E0LV84dr7?%-C7!e0B!4VB0(-RcdG8u^MH4}oEx&@3gB8d0P z;gsEql>V+S;#x}w+F+c(a-zuDv&woNQ<=3TPC{Yy@o6nKtm}e|$@eJ(TOI9LI~zVk zOdS$r-x{f-%W5FV0I5-7*RMD0mW-43d5e~vx7j7Yzjd(BY=^``l=iupO`QFsT z<-FJYUF<^8tgixXa4%7rQm&(gUj3|=Nwq!Tp#-yF^H$|^XU`sJ&;}SH3+!2tk+5=Y z4}>1k#uFl1m!S>z%E z^d?3Y>RxL=SBV!?yj>5ywY_D&EY)%9g}|vjk-?|zV~dGrUy-hW9w zz#D;SKq(W{`T=DG)&QkotSwj|{!E|=20D_$4>A2A)magB#lQ!{0&pCl8!*ZROa(C@ zl~K%#R^lfOFeOeHSg6Bd02HMW3x`7;U@6!PFkBa_9T-lQ)G^%llLnDvYtI%9aD~kg z8AjG%1cPK0X%>u>uwyT8e^?v`fK+syejGQ+0B+g@bb3H4pqyFb9iVCRn99S|n45n- ztqBt)x2*rb4npIDU?A4IMIwjV}Q=+K~ z`xn#o71;503%`mM@e~5N+<-8iJ4s_lv`0M_LX{3Fi^@EVcFgi>vF9H}gYt0%TBS4* zgALARj&MYKZt=z~dv@*~hC%xzD^0NNTU8Pcd@r-2=B8x15Df=eZX@zTS|s4UQ)=m& zJvVcua?_r_Vb3qURGGPaD3bA{_Vx(nAs`<7uIi>m@nLQOZ z$Q`P=h_zX!#16}|C9>r6oG(Q}Ay+i39G9wmOGf+{32=eC z;Bw%AT2hGPL0dJ!h~T29rY27>CZGW^mIK#=%Br7Z7_34*wM0(mNamuBk&hK%7wuNJ zyEE>bP_k;SjDwTEm52ASGeE=Oq^N72rKwVj1?b zJ+>qfz_$)yZdaHl1IXZ1hKry{Rj6RV!Y6mJ5jM&^N@ zM*=~DlNI^}L~oUGbfnS7eh)h!5@>kPyaIS0QvYj)q}o^Yux_cKTx@4WBRXyyM*SYI zExERW9cds3JCae#4H<=89`+>KobqPBIReMsP;S^9G6v0jzB_L}iuz=9@`tC%lZ!0` z*k-;e+K-%Yk6%Ylo)C(=Ceo+McKo$yU1u9|)csx` z&Eq(h#^gLOJ7mycK%gUNO4#7x@3jnY1WZ6Uf-(j)mf}iD2DA9ZKSZEc%TOqbo)SaF z@9M}G^h1)`{RXKw`hnV8ri=in(ohQ54>(Gb9`*PQ#qn)|5G6a3i>kjAI%|l5d+eBvw*%f|J=2?#U*=>RbOR5 zeqrx0_&fuP!6XB@6%@KQU1;_%F!)6VA7SuO2Cp%|dbR6aOHI;v+^oWqk=jrrZ@cLQ z)xAu1H|~RfT?E(&uhdBluX00X!59{CSj-=zW8LV5_iqsYr9zMwtwRXPT6yp9t0W|2 z=;I`L?ghN)DCULEktMV)H@}1gt}1Rj_{5^n;ew_?R+CL!-AlaKMsbJCJ~T$iOEiCT zwvu8c0wD6=8P1zCP?}l|#F&&hKY@s*R?M8h8hQfK6oT3ldPa&N?^;4o7RL^>)Dr*(w+VelnRnfr>@T_*nN!H=9(&goYX!Ba%AQ5?im;BjDrPk|k# zXD7UjFiS*>HiF{}!Q;k|7z6AALj&a6T_B*s(P|}+eDha2&=W9JK|Mr9s}Yxei}EpI z1bDwwEfYAZ8(L;TZC?d5Ea0XK|t zD-R9Pobz#=#F@5kv4{rSI()l6?7q9u=h8k|V}FOtVMlZWNu^7zi z%BN*b2VI5Qfh;qL!X0?U*Q|C6={~m3!YhlNgncxCjSoHm>Yk~QmYYfI?P?Tmc>yh| zxCQ_+8=~5}5?&N+B2Y%oM7qs-owRPU3*CNlLC#aFMAXUvJjruim>*-U=@DbYH3LBYv5cNT|f@xSmC^$tnKtiHmqy#6W7ifS2TsljD z)9w*td`|Qls8k@)EmP894+Svq8eBplG6Mlv5TanVO@rM&rm9 zTM&52?h5$^Cy8#Jp{RgQ5(^rdC-Wtxnd~9slPzA@V%)-ENEgCWe9}Qo!nC;;kv!W0ANA@9d`Eh!`x%V= z2HWDql>M6weha~Z@D7b@+w6tnkJ}*J#Rfqs`^yM~Dd4e*8*7e3;KgeC@a*4ay{|L) z9abhk66T_JPqUsd!ed=BBDURu#|du)!EWrYA$cE|3?J8VhAke$D#<64lV(k%b~`yG#ZMvqv&%E~!pqhRDJaBCz-F)o`%%zBX~L`1OOf8_i$bfdE98p8 z_=5h1!b^$ z!z|MUqjA4+B!8sP_y1uy{ubu)m)XrN7{z|!LR80ib4~nW2fL#g%3Y;C1oR+^GjqUJ zl=`GnpAbYv!|?m1CZLAaL3vw6MTPwgRmB_0hJrau7BZ>k?-H%W<9}1Nj^c1hX9cK4=JUgg`?-01b61!F2H8iCG9-Z%n8_n~GB3 z*GJK0K$?8WkS2Q%{iFexgYN7*8qQ`gb@|2T2_iQ$4`6hejy!nrQeAxe<^40kX91ka zYcSvPOGFYMw_FonmA8>U?mjIA@y{XHJ0a6DR31A@^2Kms#OJ|svCaIg)fKwsl(#II7G-cF~A2-tpYDo7>!~F^|w9pJj-T+L;i!z=sY4SKZNI`cu)!knK%334CpflhE4OBc_cTGJ8Ho9 z1{V4dym5K+1l$5gjVdoO%||VvOWTqr6i^AR)%Yv)!LUm|3J@;S{1PxwI5cEBu8!i(V`7V2$5_D;_bVwc#7z1oX&bf4HOMYa!UXx1Xg(NQqmb z@&h7IkmrK;Y`TE1hnlOtWw*~QHi{PJJc%2%LA6{&A?05qk=b}c4CeF;(s%h zJSf1_M`XvKr9v^)^AHx!QNL;c>Eeqf7|2nIQO2MSjv>`qQL>Bj$s4u@<}W;JxS+LK zXxZ7Lh;uof1r7+`W#QQHNE6!!#*0@AdQf?hz(G&sIH z;#bf&1eFbJ`Z7KzUM%@x|tjrmoD<=_<|9`z9&%eMmp@o5Rl z$QDW)jcj~(h*vjZlE{~`a7k#WE0{LsH?mWfer2WK{nJvcYj6$+wBn4fX2!22;A+W( zCCySsn%E5RSmu}pY zM;7Am$#Q3JEG*gIV0(wbml&L2z#C_Sa8+5X;$*hC^YGb1u%rhjgFsWl5!F!6Euv$OE73uyOkhRReDf-isW8Jy#vPw9+q9 zXGV-j;bL(hN&i<4Ww&WDDWZ4FzRN{Ry(>BJv1E& z-+c(}WJ!F#ER@QRYBZohABTozL|bh?5$TxIw|RLjj)e<31b|Zjw9&@uBL=>w$vtTF zLp$s>+Ha?YPh5>W@ABf6_G!i3Ug7cjwUl)OPdT)|VNrkL4cj0;+H3t!6eg{!Fr?Ki z`Rr->e>+c7fQ($JxM8(Y(XSUU`}B}rW9<73S{`fJ@3^^|>iG7L*u_x<^>7t-pGpu` z+NE+DMBkDA4oj4N%`@dYaji>`Z)-?wG4^E!4>Nct1N!mBI$$G`@=NO1_TvoR!{7n~ z0!sU72687!;Me)qdzx9787wh)k->);P~{TuulARIhRGTOdA)Ipu}?DiJcCCWe1X9i z83_LtcK%H!zr#Qv|6eh7i~+fr{f`J_6XbEN{+?dH-6oM}9?nnu<(p$tmn)z5OOgzH z4L4#ncA2(mtC8!>zS`GWVuyv#<(<(b{lCw6ALkQ)5dnFv{xFPhoqO|q27otkxe{JH zxBoMZL1-X^4~}o9u^dYpT`qV<}Nz;Y(b-PvApWVc`+ zMc>4i@ELpuU%=UoD3o@Ge3|*anc3M*^xqE#Ki|AgOZdBb`2K(oKSQVhazqVs4XO>u zH=xoX*A@1l>OtODXh7A6d=n}I@-2n;6hD9pNn|h_z#Q|pq1uLI=f2$G8G`$^h_1z= zsV%!$D~GVJOrQe%6+(`B+k9cA;00$hAud@V+N=7aXxaL@5E|x?xl9N6b-0w%&%b|@ z>&GDajKgw=v zERW>glw{9aYr1xiu?9y!!gEI1QQ*7OkCGiE;Xm*RhQmd(V#C?z#gZ@B#jsq=>N(?d z)znMTw3L_aaxI28Liw#}+0(y24Fe GllBMO7q0>U literal 0 HcmV?d00001 diff --git a/repy_v1 b/repy_v1 new file mode 160000 index 0000000..e938a69 --- /dev/null +++ b/repy_v1 @@ -0,0 +1 @@ +Subproject commit e938a694f37771788df1769f95bf02c70fca0e32 diff --git a/repyhelper.py b/repyhelper.py new file mode 100644 index 0000000..a77b344 --- /dev/null +++ b/repyhelper.py @@ -0,0 +1,495 @@ +""" + + repyhelper.py + + + November 2008 + + + Andreas Sekine + Heavily revised by: Justin Cappos + + + Make porting Repy code to regular python easier. The main interface + is through repyhelper.translate and repyhelper.translate_and_import + + + JAC Note: I wanted to add an interface that allowed a user to specify + a repyhelper path that is separate from the Python path. This seems to + be impossible because you can't always use absolute names when importing + Python modules and because you can't prevent other imports from happening. + This prevent me from writing modules in a location other than the python + path. As jsamuel pointed out, it's not clear how this interacts with + non-relative path names (#291). My solution is to write these files into + the first item in the Python path. + +""" + + +import os # for file checks +import inspect # for fiddling with callstack/module namespaces + +# JAC / JS: to get the Python path +import sys + + +TRANSLATION_TAGLINE = "### Automatically generated by repyhelper.py ###" + + +WARNING_LABEL = """ +### THIS FILE WILL BE OVERWRITTEN! +### DO NOT MAKE CHANGES HERE, INSTEAD EDIT THE ORIGINAL SOURCE FILE +### +### If changes to the src aren't propagating here, try manually deleting this file. +### Deleting this file forces regeneration of a repy translation + +""" + + + +class TranslationError(Exception): + """ An error occurred during translation """ + + + +#For keeping a truly shared context between translated files +shared_context = {} +def get_shared_context(): + """ Ensure all imported repy code has a common 'mycontext' dict """ + global shared_context + return shared_context + + +# this specifies where the preprocessed files should end up. By default, they +# will be written to the same directory as they are in. If there is +# a relative path name, it will be written in sys.path[0] +_importcachedir = None + +def set_importcachedir(newimportcachedir): + """ + + Repyhelper creates Python versions of repy files. This function sets + the location where those all files will be stored. By default, files are + stored wherever they are found in the python path. If a relative path + name is specified, by default, files are instead stored in the first + directory in the Python path sys.path[0] (usually the current directory) + + + newimportcachedir: + The location where all files should be stored. Use None to restore + the default behavior + + + TypeError if the path is invalid. + ValueError is thrown if the newimportcachedir isn't in the path + + + None. + + + None. + """ + global _importcachedir + + # handle None... + if newimportcachedir == None: + _importcachedir = None + return + + # else, is this a valid path? + if type(newimportcachedir) != str: + raise TypeError("Type of newimportcachedir '"+str(newimportcachedir)+"' is not a string") + + # If it's an empty string, assume it's '.' + if newimportcachedir == '': + newimportcachedir = '.' + + if not os.path.isdir(newimportcachedir): + raise TypeError("Path given for newimportcachedir '"+str(newimportcachedir)+"' is not a directory") + + + if newimportcachedir not in sys.path: + raise ValueError, "The import cache dir '"+newimportcachedir+"' isn't in the Python path" + + # set the path... We're done. + _importcachedir = newimportcachedir + + + + + + +def set_shared_context(context): + """ + + Set the shared mycontext dictionary + + + context: + A dict to use as the new mycontext + + + TypeError if context is none + + + Creates a python file correspond to the repy file, overwriting previously + generated files that exists with that name + + + The name of the Python module that was created in the current directory. This + string can be used with __import__ to import the translated module. + + """ + global shared_context + if context is None: + raise TypeError("Context can't be none") + shared_context = context + + +#Ensure the generated module has a safe name +# Can't use . in the name because import uses it for scope, so convert to _ +def _get_module_name(repyfilename): + head,tail = os.path.split(repyfilename) + tail = tail.replace('.', '_') + return os.path.join(head, tail) + + +def _translation_is_needed(repyfilename, generatedfile): + """ Checks if generatedfile needs to be regenerated. Does several checks to + decide if generating generatedfilename based on repyfilename is a good idea. + --does file already exist? + --was it automatically generated? + --was it generated from the same source file? + --was the original modified since the last translation? + + """ + + if not os.path.isfile(repyfilename): + raise TranslationError("no such file:", repyfilename) + + if not os.path.isfile(generatedfile): + return True + + #Read the first line + try: + fh = open(generatedfile, "r") + first_line = fh.readline().rstrip() + current_line = '' + for line in fh: + current_line = line + last_line = current_line + fh.close() + except IOError, e: + raise TranslationError("Error opening old generated file: " + generatedfile + ": " + str(e)) + + #Check to see if the file was generated by repyhelper, to prevent + #clobbering a file that we didn't create + if not first_line.startswith(TRANSLATION_TAGLINE): + raise TranslationError("File name exists but wasn't automatically generated: " + generatedfile) + + if not last_line.startswith(TRANSLATION_TAGLINE): + # The file generation wasn't completed... I think this means we should + # silently regenerate (#617) + return True + + #Check to see if the generated file has the same original source + old_translation_path = first_line[len(TRANSLATION_TAGLINE):].strip() + generated_abs_path = os.path.abspath(repyfilename) + if old_translation_path != generated_abs_path: + #It doesn't match, but the other file was also a translation! Regen then... + return True + + #If we get here and modification time of orig is older than gen, this is still + #a valid generation + repystat = os.stat(repyfilename) + genstat = os.stat(generatedfile) + if repystat.st_mtime < genstat.st_mtime: + return False + + return True + + +def _generate_python_file_from_repy(repyfilename, generatedfilename, shared_mycontext, callfunc, callargs): + """ Generate a python module from a repy file so it can be imported + The first line is TRANSLATION_TAGLINE, so it's easy to detect that + the file was automatically generated + + """ + + #Start the generation! Print out the header and portability stuff, then include + #the original data and translations + try: + # Create path if it doesn't exist. + # JAC: due to #814, we check for the empty directory too... + if os.path.dirname(generatedfilename) != '' and not os.path.isdir(os.path.dirname(generatedfilename)): + os.makedirs(os.path.dirname(generatedfilename)) + fh = open(generatedfilename, "w") + except IOError, e: + # this is likely a directory permissions error + raise TranslationError("Cannot open file for translation '" + repyfilename + "': " + str(e)) + + # always close the file + try: + print >> fh, TRANSLATION_TAGLINE, os.path.abspath(repyfilename) + print >> fh, WARNING_LABEL + print >> fh, "from repyportability import *" + print >> fh, "from repyportability import _context" + print >> fh, "import repyhelper" + if shared_mycontext: + print >> fh, "mycontext = repyhelper.get_shared_context()" + else: + print >> fh, "mycontext = {}" + print >> fh, "callfunc =", repr(callfunc) + #Properly format the callargs list. Assume it only contains python strings + print >> fh, "callargs =", repr(callargs) + print >> fh + _process_output_file(fh, repyfilename, generatedfilename) + # append the TRANSLATION_TAGLINE so that we can see if the operation was + # interrupted (#617) + print >> fh + print >> fh, TRANSLATION_TAGLINE, os.path.abspath(repyfilename) + except IOError, e: + raise TranslationError("Error translating file " + repyfilename + ": " + str(e)) + finally: + fh.close() + +def _process_output_file(outfh, filename, generatedfilename): + """ Read filename and print it to outfh, except convert includes into calls to + repyhelper.translate + """ + try: + repyfh = open(filename, "r") + repyfiledata = repyfh.readlines() + repyfh.close() + except IOError, e: + #Delete the partially translated file, to ensure this partial translation + #doesn't get used + try: + os.remove(generatedfilename) + except (IOError, OSError): + pass + raise TranslationError("Error opening " + filename + ": " + str(e)) + + #Having read all the data, lets output it again, performing translations + #as needed + for line in repyfiledata: + #look for includes, and substitute them with calls to translate + if line.startswith('include '): + includename = line[len('include '):].strip() + modulename = _get_module_name(includename) + print >> outfh, "repyhelper.translate_and_import('" + includename + "')" + else: + print >> outfh, line, #line includes a newline, so dont add another + + +def translate(filename, shared_mycontext=True, callfunc="import", callargs=None, force_overwrite=False): + """ + + Translate a Repy file into a valid python module that can be imported by + the standard "import" statement. + + Creates a python file correspond to the repy file in the current directory, + with all '.' in the name replaced with "_", and ".py" appended to it to + make it a valid python module name. + Performs several checks to only perform a translation when necessary, and + to prevent accidentally clobbering other files. + The repyhelper and repyportability modules must be in the Python path for + the translated files + Note that the optional arguments used to set variables are only used + if the file is retranslated--otherwise they are ignored. To ensure they're + used, manually delete the translation to force regeneration + + + repyfilename: + A valid repy file name that exists in the Python path (sys.path). If the + filename contains a directory separator, it is used instead of the path. + shared_mycontext: + Optional parameter whether or not the mycontext of this translation + should be shared, or the translation should have it's own. Default True + callfunc: + Optional parameter for what the callfunc of this translation should be. + Should be valid python string. Default "import" + callargs: + A list of strings to use as the repy's "callargs" variable. Default empty + list. + force_overwrite: + If set to True, will skip all file checks and just overwrite any file + with the same name as the generated file. Dangerous, so use cautiously. + Default False + + + TranslationError if there was an error during file generation + ValueError if the file can't be found or directory is invalid + + + Creates a python file correspond to the repy file, overwriting previously + generated files that exists with that name + + + The name of the Python module that was created in the current directory. This + string can be used with __import__ to import the translated module. + """ + + global _importcachedir + + filedir = None # The directory the file is in. + filenamewithpath = None # The full path to the file including the filename. + destdir = None # where the file should be written when generated + + # If the file name contains a directory, honor that exactly... + if filename != os.path.basename(filename): + # since the name contains a directory, that's the filename + path + filenamewithpath = filename + + # I need to use the absolute path because python doesn't handle '..' in + # directory / module names + filedir = os.path.abspath(os.path.dirname(filename)) + + # write it to the first directory in the python path (by default) + destdir = sys.path[0] + + # Let's verify these exist and if not exit... + if not os.path.isdir(filedir): + raise ValueError("In repyhelper, the directory '" + filedir + "' does not exist for file '"+filename+"'") + if not os.path.isfile(filename): + raise ValueError("In repyhelper, the file '" + filename + "' does not exist.") + + else: + # Determine in which directory in the file is located (using the + # Python path) + for pathdir in sys.path: + possiblefilenamewithpath = os.path.join(pathdir, filename) + if os.path.isfile(possiblefilenamewithpath): + filenamewithpath = possiblefilenamewithpath + filedir = pathdir + break + + # make sure we found something. + if filenamewithpath is None: + raise ValueError("File " + filename + " does not exist in the Python path.") + # write it where it was (by default) + destdir = filedir + + + if callargs is None: + callargs = [] + + # expand the name from foo.repy to foo_repy (change '.' to '_') + modulenameonly = _get_module_name(os.path.basename(filename)) + generatedfilenameonly = modulenameonly + ".py" + + # if it shouldn't be in the default location, put it in the correct dir + if _importcachedir != None: + destdir = _importcachedir + + # let's generate it + generatedfilenamewithpath = os.path.join(destdir, generatedfilenameonly) + + if force_overwrite or _translation_is_needed(filenamewithpath, generatedfilenamewithpath): + _generate_python_file_from_repy(filenamewithpath, generatedfilenamewithpath, shared_mycontext, callfunc, callargs) + + # return the name so that we can import it + return modulenameonly + + + +def translate_and_import(filename, shared_mycontext=True, callfunc="import", callargs=None, + force_overwrite=False, preserve_globals=False): + """ + + Translate a repy file to python (see repyhelper.translate), but also import + it to the current global namespace. This import behaves similarly to python's + "from import *", to mimic repy's include semantics, in which + included files are in-lined. Globals starting with "_" aren't imported. + + + filename: + The name of the repy filename to translate and import + shared_mycontext: + Whether or not the mycontext of this translation should be shared, or + the translation should have it's own. Default True + callfunc: + Optional parameter for what the callfunc of this translation should be. + Should be valid python string. Deafault "import" + callargs: + A list of strings to use as the repy's "callargs" variable. Default empty list. + force_overwrite: + If set to True, will skip all file checks and just overwrite any file with + the same name as the generated file. Dangerous, so use cautiously. + Default False + preserve_globals: + Whether or not to preserve globals in the current namespace. + False means globals in current context will get overwritten by globals + in filename if the names clash, True means to keep current globals in the + event of a collision. Default False + + + TranslationError if there was an error during translation + + + Creates/updates a python module corresponding to the repy file argument, + and places references to that module in the current global namespace + + + None + + """ + + modulename = translate(filename, shared_mycontext, callfunc, callargs, force_overwrite) + _import_file_contents_to_caller_namespace(modulename, preserve_globals) + + +#List of globals to skip; we want to make sure to ignore these when +#inserting the imported module's vars into the caller's namespace +# Could also blacklist the repyportability things here.... +GLOBAL_VARS_BLACKLIST = set(['mycontext', 'callfunc', 'callargs', 'repyhelper']) + +def _import_file_contents_to_caller_namespace(modulename, preserve_globals): + """ + Responsible for importing modulename, and taking the contents and + injecting them into the caller's namespace. If overwrite_globals is set to + false, then globals that are already defined in the callers namespace get + skipped. + + Doesn't include objects that start with "_" + + BIG HACK WARNING: + The idea here is to use inspect to get a handle to the caller's module, and + start inserting elements from the imported module into the caller's global + namespace. This is to simulate the repy behavior of inlining includes, which + puts everything in the same namespace. + + """ + #DEBUG + #caller_file = os.path.basename(inspect.currentframe().f_back.f_back.f_code.co_filename) + #print "*** IMPORTNG", modulename, "INTO FILE", caller_file, "***" + + + #Let python handle the initial import + import_module = __import__(modulename) + + #To get a handle on the caller's module navigate back up the stack: + #Go back 2 frames: back to translate_and_import, and another to + #whoever called that + caller_globals = inspect.currentframe().f_back.f_back.f_globals + + + #Now iterate over the import's members, and insert them into the + #caller's namespace + for name,definition in inspect.getmembers(import_module): + + #like normal python from imports, don't import names starting with "_" + if name.startswith('_'): + continue + + #Skip blacklisted items + if name in GLOBAL_VARS_BLACKLIST: + continue + + #skip already defined vars if told to do so + if name in caller_globals and preserve_globals: + continue + + caller_globals[name] = definition + + diff --git a/repyhelper.pyc b/repyhelper.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e92a1f5b0426b9457265f7fdf7fac89068ecb08 GIT binary patch literal 14404 zcmeHOO>7)TcCMZojmQyYO0p!%>-D;p+BHWGCD}ra>^kHki!BC+ZiZFZFVua19zh+p$hxP(eQ#nn?`MfE-P z6gT$1qMlZY`>J|cRUxi571q?#Qz|^AqPps$WLssk8*=lC3eU*Ryb9;!=9~)8%FTHdT~HwkR8;h;3g^}5X!YWOmwbOt zeSS(ky`-YcDm<@3RKd%OYP+UBLr)e|cuAyPp`2FpGQ0ess#N37<1F3If`Q%$2GRE$ zT4&L4+>83dD7!HnHyU?Zd64B%$WI&T^JuUYW%|}T@BBJGt&H+sn(@m@5@u0gbSv78 zlL#*!M#1yAKZdmDv57*xHNLGs7@0gy^u1s>Obx0(9%Z^jmAdjy(}GV_ojQ1XF+26K_2NK z36K5I#)o!1VEc^*oW0yL+`=}32iWQl6*1!F@Ubg*U8{s^)Y%&F0Py$DK6_b|K=e1cOo3$C}^-hdLeQ%R0`Nj84*Aclr^QL@(V#eT6KE3Ow|0J|~A1!6hyoSSZ(nH=p$ z1H6?t)3eoR#Q%x^Hq1LPtE*S9Y8Xa3Kjj`U2l894HE0|gjFfWb5rs}~AS!?s* zT1&sbw!W&LtgWx>yQ})q$E*Gme{FMfb>o{-NUeZ6{rZ4;7E*I#?Ewz0CVTaP~S@2!ep$`CngUERU(Z=;v!I(ulcjt;Vj zD`}Xe!(bcg!xl03(X!67F{~{aNq54iAIUcvdk)nNZIwInL3h)vgEnBqb_`_pyUr{i z)>D%cALwh-Od~GU>7SMPs?MNcG~SGZq?*ssKd=(I*4utTUdI% zX!oWY$o3#j;ylgTrjy0P{N|y@1F-5->Mi`53NCf;KsWF4eGQjf0jZ^P@8LDQowOdu$s>4P$`Ku}}%w95WM z*&Cg9?Ia51 zl(^p=B^`lE6XgbCEee$Y3Pi_BfI}qMV?9{B+KX`uH({^}qmZ)hj&&GygHa#gY0F^p zwaBxD=o^Uha9h1-OaOq%j^Ga*%2{YGff=wtk2VFYB8yf;6t#XYj>2Uk-7zi4iOHiN zbQ%_oSa20dVVp%Bs6Q^gO~I@-V{?OG{*!l}>E_5-G$MA1t;1E7wXZ=Ke(tPf+oJ)T zm?1}jTZs0o8|(y~UWC%Oo8w_5D5vADV6b>9@n_G2ejL)fYW;E09~H$czuAM!vK5YW z2(v&0FJ=#e1sQC+6@yN#cDoo|*E+Rl3;gOw`6w&YDn4wO*RimvBp2z?rk2ckd|%?k z(vLY+t5q*}Z&c^J*$V%i_byIQmz@*T z{Wflhx)rrMqq5gIZvftDEDaF&Fk+BV`WH+q*UJ=h*WmoQWf0<7bm@A;^I)|H8JRGv z9WqDfg9;;Dv91byL^v$hIv8xUH<9SXe5oBdp>|bDD8iPHF8_NDkAotzAU?u>f;E_7 z3Iue#NIWdREo($SSwU<`c;j?rK<^-5PjNhpvl1CZFJj|JDLR8tfk5dxalqub#sx%S zXzaCH$(E4Vs(bM7)`yhfd<6Cw7RA)n;mp^#U=}ego3tN z_z$b9xy03C>4b|lATohnZOE$dXYk#huu6P%`4l#kcu}d;s5sdxNaG7tBF=3QQyef`xP09#znA_x5t+CJH zz}S@o$|0^ZJpvkr>%L@B_G{|JMULeF5ID`9Pexbbm)5@bRGCvC-dJ6#Lmyqn0JkC=;epyvJXH-7N*XAEZdB~g;0s4U!mL>j8 zR7a)!tgH0q&MTtqOfeMmjzaFws+Zt|&%x z;dK{FAn-twTdddQf-)@(x4zelI=hHGyAwbWTuP!Slu!+pR|Zq$c=uCqN<#)0^byiY zZ5TEzH$)BsK7S&LaZnkG7OPKh1;hr#4AX0p=o#qkGzHd#QDBM4_3I%U6Mcd{;4>U+ zLAW0X0$#sPh>Af3jt_nhl`1uvBtQ`IgJPv4gpAV4bPYmOn#J33f_`EZ$6e4&ZQ=xF z`K=!yJ})o~PRG`TVuse4ASW~G^hD}yG2P53QV2n@6fCtF4V{ANP3Zj%~w%#3MbfhY&en<35w-!W+RIQ)I`El4sjppw4B9} ze_&Wi>+JNw`vqav9*Oe)8N3xfVwkKm%!1@>a?@YgXsxeou07glZ>~I8U)xv}`U2#! zSS2#K{E#vdnnMJKkX#fn0E|2aM6uQPgT9HH0`ufvun@+0F0;X|2xaQ_L=U0#ZBd>z zoTsuc*o1r`;o`j6*JO-Qei2tpL3?T{+oG5aDP41IsB2%-_i*Kk&g=+O;{C~ED7okX zwPMqzSw`WX;*0qXE~;_Cy8yOTt5)HFE`Wob^%}Uof~UpWy!UElzFMnZ#+SKD4bLo7 zt8_TJRGrg&55MNmF&5))fjDqZf+2uiV$H|Jd!SV5%%lJ?Zq9wK9rMkzaqt9^PbrJ3 z2y=m{e+*2m15-~kxh=774VVgu17=SE6PWtb|4`$%TY@LGlV?4E*3@*%H;-Vioa{zX_z}Yc4 z$?*mPu@*lKxEIoxmYrD^7KK2oO+c9qhiM^%;bTmA86atMp~!}h$>YK(M_?yJ5kiQE zg3<)!FTf-e!;gTV;NhM?;k_VXC`Sac5*V>fnIht@DmuCZEPMn1O1Efz%$x^XF^CTW zcS$tsx4{*@yhM?Mk}%TMqA;5GHp`Z;} zDEmvidm9(Q%O@-T#@fb%_WH`*)pbFAbT|{xYa{E5RH)yJZE+49LSYBKKrW-p z1CT5Hk>t)X;j;gtSkd+JfgSg6qPYL76pUYcIVoQLCIo_JT99sA6OfLyxl-Dk+`mT= zbBis{R;%CjzV5y1%~uwyRaM1(!~GB3tTepKmCIhuJ6ElF8c6w8rNgC1pHbrEKjUG1 zjZjBWgV}?)1G~Waa1VwFZistuccG7;0I-lj1l0X5pzf6N#^*`*b^sbE;*nu4ND=tk zE&^f{jFjhj2{4bSC^#&+Mxx@WL#X)Tw-p%c{bwHwYXzx9&QRbf`v*cOyrO-46Ks>Z zohHGwe05)Pi#6aRo)@r-xBy7|^bZI`HB_yvd3bn&t;AN7?#@KGAhn1*8W)Lh5pLlO zk$vGF1pxtzsM}ixJ!8fRQEdW($h1(WKMKL&xU&KD>G#Pgfddn>yHVHz<5d3}Y>tu5 zL5@`LA!_4fYQxokoSdo6wdRu4T)MW=l$eQCB<)f#9piJwTN`2t%Y32%B5&b4k6M=R z;FGhjw|K{u>|f)BsMO>d5y~Q?AgjL^DF282CVUEu5oBdL8ViFw5X?r} zUqA{rma)cpt;3{XvDdh8Z3##e zBQQTF4$?djgn>IOhyw+AKvkQPcNGvZ2}3Z;9xF%-vfORKPnW$US1%sA#_m6RPr))= zT`&W&5}NpI@e%@OsbKa?-Y{C6p|X!Z{iVXs6W9*=&&1YS(x42%0;tABGpLLX0VQ0) z6>nQ2Q&J^Fg$IX=LjgTt$=C%zM2E%0Y8b>99_}HC)odMH*oUN^ z4si`6=pop01@Pr046;x!+6A?UOfL2+m^jDI9!tsPD`(6AGr~oVf%uh%OolO{-* zNC~554+3E{=z!fFvH3x`1ca3szAO%K!~oP0yFoG{l0KA1+`a?5LUNi#lNPUEmu~Li zxCfiDn@RC@V)vppQ;ook`k97Dw|Hx9y6}ZV!Mgw1=eg{R>nY2+%7YmIE2Dv2^gT;xpF5v(u@Zd zI7%{n#nzY0@G_>g3-T~^E3|%8OcT$EAO^=C9o8U554+pgMgdLdWR{GNdo;2kU(5&x z&gna~fh9S5q_Snh@4z6tLfEgO9pRuwx|bloWVa7BvlAk%AeCRDCguvNy46;cKUTj| zvBEZ6SK*FRi2XZ#tSKJcr?l20rQmn4AqnWR`^s1v6H9?@#EDEaRH9b@zfJSi zw`nkLF}Q*3dMd4I>#sdIfV8wGTIO6kcygA^{HF#n=obrUGp~GBeHfFX6NQ8!W(3R|0RJp|H<* z)2Ft=`+f3$pNCBR`?%N;V~vk1yxir5^jLO-51Fz=7wjNLOAaFOo{iHSCI2X)e`InN z;5}dRp8KuEsk|eV7&=Wt2Kv&Rn`xyb7Zv0SJ96Yp(f)~t8xnUFz9M>qamO7fpl;QX zu&h_FRO+>FRL^1y`AsA#zjo@JcS#bOb#De+$_te`Hg*$-ia3n;JP$h(?k~6 zU4p3W*EO6(L4F>o>teGLvSBcS*0hd-n{I=xff8}bkqGbLNE^3Oi)5A@P@iUH^lmjd; zp?x`;LQ!puAi}?hx;*)+Jf-9T+KMDWkrr%#QYb)5^M8Y;1aj!A45*|p1>Y#f{ls|6 z{}w~xQk$BU|0#lkXbR`3pGR$LlO?Y^ys9Mbl9Jk$EW)G0s3+F!)D{+`2N8~)PS+{Q zy8{cBz#++8(zG1;aGYp!%{XB&+QO6^Te9$h$S*KRzL5;NwqQiNQ8YY+He^3^(N3N_ zZp|ZhowN@Qi{(CHqy>RP;r}Lio3bVdqr5MqT{lNVB*hpC7D#LHZR(s1F^Cue5GjG9 zj4YR>Ym%H~K+k}&i}Qxqv=ve=5tx(cC_*t6cBm(KOg*XUfnzf~+hyNxi1YBD(%;bO z6sKG`L3W=3hXC7o+NKdknVkC-oVVHaV_1U!o0LVuadvO-!V0KiwC+MVNb7RPTS#EID}iqEq-`x!=7A}jJ!^_s5s+Oh!@{pI(A#(?%D(WaOK{Q zwA(_mPK2(T2Q=iEK9xs*83-cTfImSIlEdcl9X1GZet095(Gp5V%o6jIb$Y zG!MGq&tz-~SpuaOvb2o{ZGCRlZ^&MW97dR&c@yUD21io=4sN=VVWGiv+gm|rSIn$i ztO^l=aU94WKN z;X5319!0hSIbPC~EGhi`Oq!fH%J@HlHryUCgc}@`J9X*Q*KszKbiNLJ2^u)i@Wj=rrqo$K>0))dm4}>d^z;OakAlA8JWpWhp+kJIxj?R{~0cF zHU;+Qclm~@P6j~kPHLla2BHh9&Ye|5=^TT(>Rh$nsMjvn>)z~ay)yfq**9kEv)`Rv Po_*s|I_u59bEWcMHXqQD literal 0 HcmV?d00001 diff --git a/repyportability.py b/repyportability.py new file mode 100644 index 0000000..ef48be7 --- /dev/null +++ b/repyportability.py @@ -0,0 +1,313 @@ + + +import __builtin__ + +# I'm importing these so I can neuter the calls so that they aren't +# restricted... + +import os +import sys +import safe +import nanny +import emulfile +import emulmisc +import namespace +import nonportable +import virtual_namespace + +# WTF!?! repyportability uses repyhelper to import dylink!?! +import repyhelper + +# JAC: Save the calls in case I want to restore them. This is useful if +# repy ends up wanting to use either repyportability or repyhelper... +# This is also useful if a user wants to enforce restrictions on the repy +# code they import via repyhelper (they must use +# restrictions.init_restriction_tables(filename) as well)... +oldrestrictioncalls = {} +oldrestrictioncalls['nanny.tattle_quantity'] = nanny.tattle_quantity +oldrestrictioncalls['nanny.tattle_add_item'] = nanny.tattle_add_item +oldrestrictioncalls['nanny.tattle_remove_item'] = nanny.tattle_remove_item +oldrestrictioncalls['nanny.is_item_allowed'] = nanny.is_item_allowed +oldrestrictioncalls['nanny.get_resource_limit'] = nanny.get_resource_limit +oldrestrictioncalls['nanny._resources_allowed_dict'] = nanny._resources_allowed_dict +oldrestrictioncalls['nanny._resources_consumed_dict'] = nanny._resources_consumed_dict +oldrestrictioncalls['emulfile.assert_is_allowed_filename'] = emulfile._assert_is_allowed_filename + + +port_list = range(60000, 65000) + +default_restrictions = {'loopsend': 100000000.0, 'netrecv': 1000000.0, 'random': 10000.0, 'insockets': 500.0, 'fileread': 10000000.0, 'netsend': 1000000.0, 'connport': set(port_list), 'messport': set(port_list), 'diskused': 10000000000.0, 'filewrite': 10000000.0, 'lograte': 3000000.0, 'filesopened': 500.0, 'looprecv': 100000000.0, 'events': 1000.0, 'memory': 150000000000.0, 'outsockets': 500.0, 'cpu': 1.0, 'threadcpu' : 1.0} + + +resource_used = {'diskused': 0.0, 'renewable_update_time': {'fileread': 0.0, 'loopsend': 0.0, 'lograte': 0.0, 'netrecv': 0.0, 'random': 0.0, 'filewrite': 0.0, 'looprecv': 0.0, 'netsend': 0.0, 'cpu': 0.0}, 'fileread': 0.0, 'loopsend': 0.0, 'filesopened': set([]), 'lograte': 0.0, 'netrecv': 0.0, 'random': 0.0, 'insockets': set([]), 'filewrite': 0.0, 'looprecv': 0.0, 'events': 0.0, 'messport': set([]), 'memory': 0.0, 'netsend': 0.0, 'connport': set([]), 'outsockets': set([]), 'cpu': 0.0, 'threadcpu' : 1.0} + +def _do_nothing(*args): + pass + +def _always_true(*args): + return True + + +# Overwrite the calls so that I don't have restrictions (the default) +def override_restrictions(): + """ + + Turns off restrictions. Resource use will be unmetered after making + this call. (note that CPU / memory / disk space will never be metered + by repyhelper or repyportability) + + + None. + + + None. + + + Resource use is unmetered / calls are unrestricted. + + + None + """ + nonportable.get_resources = _do_nothing + + nanny.tattle_quantity = _do_nothing + nanny.tattle_add_item = _do_nothing + nanny.tattle_remove_item = _do_nothing + nanny.is_item_allowed = _always_true + nanny.get_resource_limit = _do_nothing + nanny._resources_allowed_dict = default_restrictions + nanny._resources_consumed_dict = resource_used + emulfile._assert_is_allowed_filename = _do_nothing + + + +# Sets up restrictions for the program +# THIS IS ONLY METERED FOR REPY CALLS AND DOES NOT INCLUDE CPU / MEM / DISK +# SPACE +def initialize_restrictions(restrictionsfn): + """ + + Sets up restrictions. This allows some resources to be metered + despite the use of repyportability / repyhelper. CPU / memory / disk + space will not be metered. Call restrictions will also be enabled. + + + restrictionsfn: + The file name of the restrictions file. + + + None. + + + Enables restrictions. + + + None + """ + nanny.start_resource_nanny(restrictionsfn) + +def enable_restrictions(): + """ + + Turns on restrictions. There must have previously been a call to + initialize_restrictions(). CPU / memory / disk space will not be + metered. Call restrictions will also be enabled. + + + None. + + + None. + + + Enables call restrictions / resource metering. + + + None + """ + # JAC: THIS WILL NOT ENABLE CPU / MEMORY / DISK SPACE + nanny.tattle_quantity = oldrestrictioncalls['nanny.tattle_quantity'] + nanny.tattle_add_item = oldrestrictioncalls['nanny.tattle_add_item'] + nanny.tattle_remove_item = oldrestrictioncalls['nanny.tattle_remove_item'] + nanny.is_item_allowed = oldrestrictioncalls['nanny.is_item_allowed'] + nanny.get_resource_limit = oldrestrictioncalls['nanny.get_resource_limit'] + nanny._resources_allowed_dict = oldrestrictioncalls['nanny._resources_allowed_dict'] + nanny._resources_consumed_dict = oldrestrictioncalls['_resources_consumed_dict'] + emulfile.assert_is_allowed_filename = oldrestrictioncalls['emulfile.assert_is_allowed_filename'] + +# from virtual_namespace import VirtualNamespace +# We need more of the module then just the VirtualNamespace +from virtual_namespace import * +from safe import * +from emulmisc import * +from emulcomm import * +from emulfile import * +from emultimer import * + +# Buld the _context and usercontext dicts. +# These will be the functions and variables in the user's namespace (along +# with the builtins allowed by the safe module). +usercontext = {'mycontext':{}} + +# Add to the user's namespace wrapped versions of the API functions we make +# available to the untrusted user code. +namespace.wrap_and_insert_api_functions(usercontext) + +# Convert the usercontext from a dict to a SafeDict +usercontext = safe.SafeDict(usercontext) + +# Allow some introspection by providing a reference to the context +usercontext["_context"] = usercontext +usercontext["getresources"] = nonportable.get_resources +usercontext["createvirtualnamespace"] = virtual_namespace.createvirtualnamespace +usercontext["getlasterror"] = emulmisc.getlasterror +_context = usercontext.copy() + +# This is needed because otherwise we're using the old versions of file and +# open. We should change the names of these functions when we design +# repy 0.2 +originalopen = open +originalfile = file +openfile = emulated_open + +# file command discontinued in repy V2 +#file = emulated_open + +# Create a mock copy of getresources() +def getresources(): + return (default_restrictions, resource_used, []) + +# Needed for ticket #1038. +# `safe._builtin_destroy()` normally removes the ability to call `import`. +# it would be called inside of `createvirtualnamespace()` +# If we didn't do this, we would not be able to call `import` after +# calling `createvirtualnamespace()` +for builtin_type in dir(__builtin__): + if builtin_type not in safe._BUILTIN_OK: + safe._BUILTIN_OK.append(builtin_type) + + +def initialize_safe_module(): + """ + A helper private function that helps initialize + the safe module. + """ + + # Allow Import Errors. + safe._NODE_CLASS_OK.append("Import") + + # needed to allow primitive marshalling to be built + safe._BUILTIN_OK.append("__import__") + safe._BUILTIN_OK.append("open") + safe._BUILTIN_OK.append("eval") + + + # Allow all built-ins + for builtin_type in dir(__builtins__): + if builtin_type not in safe._BUILTIN_OK: + safe._BUILTIN_OK.append(builtin_type) + + for str_type in dir(__name__): + if str_type not in safe._STR_OK: + safe._STR_OK.append(str_type) + + safe.serial_safe_check = _do_nothing + safe._check_node = _do_nothing + + + +# Override by default! +override_restrictions() +initialize_safe_module() + + + + +# This function makes the dy_* functions available. +def add_dy_support(_context): + """ + + Enable usage of repy's dynamic library linking. This should only + be called on the module-level. + + + _context: + The context that dylink's functions should be inserted into. + + + Public functions from dylink.repy will be inserted into _context. + _context should be globals() for a module. + + + Exception is raised when a module import fails. + + + None + """ + # Add dylink support + repyhelper.translate_and_import("dylink.r2py", callfunc = 'initialize') + + # The dy_* functions are only added to the namespace after init_dylink is called. + init_dylink(_context,{}) + + original_import_module = _context['dy_import_module'] + + def _new_dy_import_module_symbols(module, callfunc="import"): + # Remember the path we are currently in. We need to change to + # this script's dir (assuming it also contains dylink.r2py and + # rest of the Repy runtime and libraries) so that dylink is + # able to link in code from the runtime. + # This is required due to Repy safety measures that inhibit + # dylink to access files outside of its directory. + # Once dylink is done, we return to the previously-current + # working dir. + previous_cwd = os.getcwd() + repyportability_dir = os.path.dirname(os.path.realpath(__file__)) + os.chdir(repyportability_dir) + + # If we are using repyportability, we want to check all pythonpath for + # the file we are looking to import. + COMMON_EXTENSIONS = ["", ".py", ".repy",".py.repy", ".pp", ".r2py"] + + # Check all combination of filepath with file extension and try to import the + # file if we have found it. + for pathdir in sys.path: + possiblefilenamewithpath = os.path.join(pathdir, module) + + # If we have found a path, then we can import the module and + # return so we do not continue to look in other paths. + if os.path.isfile(possiblefilenamewithpath): + filenamewithpath = possiblefilenamewithpath + importedmodule = original_import_module(filenamewithpath, callfunc) + os.chdir(previous_cwd) + return importedmodule + + # If we don't find the file, we just call down to dylink, and + # let it raise the appropriate error. + try: + importedmodule = original_import_module(module, callfunc) + return importedmodule + except: + raise + finally: + os.chdir(previous_cwd) + + _context['dy_import_module'] = _new_dy_import_module_symbols + + + # Make our own `dy_import_module_symbols` and add it to the context. + # It is not currently possible to use the real one (details at ticket #1046) + def _dy_import_module_symbols(module,new_callfunc="import"): + new_context = _context['dy_import_module'](module, new_callfunc)._context + # Copy in the new symbols into our namespace. + for symbol in new_context: + if symbol not in _context: # Prevent the imported object from destroying our namespace. + _context[symbol] = new_context[symbol] + + + + _context['dy_import_module_symbols'] = _dy_import_module_symbols + + + + diff --git a/repyportability.pyc b/repyportability.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81bc312ec93fd6f4cc2c4101d7d635d17623c065 GIT binary patch literal 8156 zcmd5>OK%*<5w6)Mmm(!uZ&{W!vSfQ@$)q2$;uwUZXJR214pVf1!Wjx@>2kQ2!hNXrQ}`4r zv>!x;(T8o{5xT67kUC6{a6CuHkz*KeoWiH=@Dr$>q3|Rs&^Uz(^iQL5DLjMfSqjhE zzH_LaqwslKy+D^&j#6}x!luxQa7uu9byUnCDyE8x`9#HhqT&@%@fxVWCs$1Myah)j zuz@v_4KQCAwS(Sh1n(XrCW&JMqY=DsjNsj%vgebNx95{1yiW`m+vhO~U!+fP?>M## z0z6IOMFCDw_>urP!#M#?inPpA+@NKaS3u<a>1_ zl*>ops4}fTn28Q;6% zWufpIsS~`4qh6CL{FXFdV#Qr*z5z{+zQo_6J`6Akl4LMv0%Kz3-|q*BiOgVUPY{NF zWK>rl+dd~#-E>3Q&Jn>#ON$@Gak{C(v1x0{_?gmaKWiyJj=GW2CxNE_19O; z6o5EQds-!-wEpyyxvQp*)l9iRE(cgN>`#WJ6tCVKy++Ad>dY_(q8S z@f`!JPpw{G_%I#b4luiY^ccSM46AMis}P}nF9buriMpzV4s!C}_fYE!ijW|L+`8CA zK>`}?2(mS8o48%`g!xX|RrA4WC(<2tZ@$$Dk~O9MZkj|U&3xU;qMn)GhGwca;L{9N zqZqP0*Bi+3_+jcNsp&+?+C`>c;#Hi-^JqpqGN;1ca@JSEtalJi-w>lSAPte-3cA}4)Gnc zb|w_*N5 z)eicxSoNVen4{=;f;AWZaSYZ4h>2sbGB$d`vWqb*EW0_uBcE*EM|vsHu38Z`bZ~W9 zhTCUmhnu~G4$TKj2OFh^GyZ86_HU@QPhl9ecHN}MV24Wg;6k8c5JaeG$|^izETL;74Pu>qoFe9$GOmxQ z8Cy=~fvpLCtvv{0EsVhxLy!v%*rAZ)hucox%ol{|0pslET=wM5KJR3Fu=IXyRlX_Q z=?TjJL0S3!yyJYetP$D-h(3zj@I^DNquYz#afaK;NMd(YSXuufxXCDpqmL)W^)7SB z4AUWTe+&)96PX>0Cxr6&(6J!F5~M6WWs8WX2`d&6ZxdE60*?a%HH*Oa0I6F-KA*t*_C(cL|*%|}uB;j+vQiOMg{Qs}XfcGp* z%vltLQ}~K-f3eBV({RX^#^IRe@rh~CSnp|cdhA!cXSm>O#N&j+bGhKvJYLQ4xc`mk zVfL-bobWqcG;0nM=+LY){;{@(UC)aWwRZ>bs75`o>4sk*9Bsm<7j{Y;_Wmsd_y)?9 znSKznofTC;peSSGKqH9D(rjos%ILU`pXR6px(=&>+@M_^M)_=7ywhBqK`}CSyY8Sp zj+TFm-+08#s}EWHGZ3%{-@A@iDo1i>#*tbBFKxri+wh9u$j*@uAmT#e&Dr@Du_L!s zI6J?c!=RWu`1`jB-@Ez5+HfKw07nMbC9VH7hk2r9?f$2P?_C5yAnzk=w*U-h6{pLo zs9r=Nmg%aShrPWl+Q0?6?S3MWxechfQ@bN8D0A3*>YxofnudL3&9;&0-s(zn%C_0} zBSCy$(jZPS1yCD7oNE_nZ%oIqck>tT+_>pqf9LAbl7Hv>l3xZrObTrlcI~}e?=0V1 zrBbAEPFuej4UmCm|>$~4xo_T!?F3isNd?S);&vHKxBEL zYVy4c<(#f>20bN|5w{64lEhtgysSf`bG9tn^Zf%e&XF$F_m}rO_*4!$`<$aCTQ(dC z7Wq{8K)Kd!6jop%tq{i9W+Bh#yPj0Ae@%3k;~z&mC$5sL`hmZHa){?gj3H zTB5HmeGoDs0&l!9^25Kket0tFvrYyPW3V=i8DG$DIKT}>EjNxTtkof|Wwn z5eC~;u^2=NPe52MN=$0$?@Zg??XSkz!f1XwOS{+^)1Bi;c$mgckaG*xtjZMF*y?L> zx{6@8dERX!4-JMIhND>6o>~rZ7vX#sM0oRYH#-u<*fVmi2DXx0hS7; zlN$jPHW6lV%Qn{Np*5jH*@c`!4k4XuHQK7sgCFtR1Sa43aEZ1ghGa@p^sp+nvqW1G zOLAj_9*X76K44(0&I<6bE|wC*#4xU7L2JVwPSMi+59sd4O&T1Pkr)h{DkHh`ZZb{K zqn{ybcIf@Pc$4B?4ClX(hNUK3o<+1{^Q??`NYb2Vi(F_vTeMijcK7mb9KA%f&{9Ml zT22*iwzV0G;p_#bBUTNzk8PY>6P#`=9Zc7k*GkwvUar+)8_6r^fffSmDMXs@CDNP> zHTjwZx3}nBXPk{{5fuBpG+*MjY!45onPGE!{m$*%cNYDd?=Rn6T)K52H^EUk|#gX{^8G70>ayjTz^ZGg~=U+3y^#9B^td%CpW?rA%c&Q%>30Qxbh(v&=uy zhqnPh202lJpSRMrE}HS+0?Xlz56`k9Km{<01A~*?3iLQ|R15b{622^ZOX95-y_+Cu zpBU#D;-x$fg35&ZM%DkK`b22mU{~ zySrzh=I1kPN%wmkAAQbCa5Eh~Se`9Smku@@n57b=a>!~6Pzt95gQEVA8&Zt zSkt1+C`6YqOI`xK=Xe1=S?>Z0u~f`zEA4iL3M2f~pUHOdrj!*D$?CV61wDI<;*0&D z7x_b1E5w!%z}>(Lvd1>Ydzp(@cp(=3R@xibtGvWbY|skZF$_Z4@=Wwd17{-48H6bG zWh|dD1KcfXg7+p`yenLMn~OKNxW)za@taIiWZMhY=)EOYy@Lg3sx#1-iBEZ^UYkhj zkl>Z~m3~$JqSPois*SUaBaPG3&hS&6t)Ra=ozI!6)?mnLwZ@c0m*vK(x>K+H8{YpG AVgLXD literal 0 HcmV?d00001 diff --git a/safe.py b/safe.py new file mode 100644 index 0000000..d68417a --- /dev/null +++ b/safe.py @@ -0,0 +1,698 @@ +"""An attempt at creating a safe_exec for python. + +This file is public domain and is not suited for any serious purpose. +This code is not guaranteed to work. Use at your own risk! +Beware! Trust no one! + +Please e-mail philhassey@yahoo.com if you find any security holes. + +Known limitations: + - Safe doesn't have any testing for timeouts/DoS. One-liners + like these will lock up the system: "while 1: pass", "234234**234234" + - Lots of (likely) safe builtins and safe AST Nodes are not allowed. + I suppose you can add them to the whitelist if you want them. I + trimmed it down as much as I thought I could get away with and still + have useful python code. + - Might not work with future versions of python - this is made with + python 2.4 in mind. _STR_NOT_BEGIN might have to be extended + in the future with more magic variable prefixes. Or you can + switch to conservative mode, but then even variables like "my_var" + won't work, which is sort of a nuisance. + - If you get data back from a safe_exec, don't call any functions + or methods - they might not be safe with __builtin__ restored + to its normal state. Work with them again via an additional safe_exec. + - The "context" sent to the functions is not tested at all. If you + pass in a dangerous function {'myfile':file} the code will be able + to call it. +""" + +# Built-in Objects +# http://docs.python.org/lib/builtin.html + +# AST Nodes - compiler +# http://docs.python.org/lib/module-compiler.ast.html + +# Types and members - inspection +# http://docs.python.org/lib/inspect-types.html +# The standard type heirarchy +# http://docs.python.org/ref/types.html + +# Based loosely on - Restricted "safe" eval - by Babar K. Zafar +# (it isn't very safe, but it got me started) +# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496746 + +# Securing Python: Controlling the abilities of the interpreter +# (or - why even trying this is likely to end in tears) +# http://us.pycon.org/common/talkdata/PyCon2007/062/PyCon_2007.pdf + +# Changes +# 2007-03-13: added test for unicode strings that contain __, etc +# 2007-03-09: renamed safe_eval to safe_exec, since that's what it is. +# 2007-03-09: use "exec code in context" , because of test_misc_recursive_fnc +# 2007-03-09: Removed 'type' from _BUILTIN_OK - see test_misc_type_escape +# 2007-03-08: Cleaned up the destroy / restore mechanism, added more tests +# 2007-03-08: Fixed how contexts work. +# 2007-03-07: Added test for global node +# 2007-03-07: Added test for SyntaxError +# 2007-03-07: Fixed an issue where the context wasn't being reset (added test) +# 2007-03-07: Added unittest for dir() +# 2007-03-07: Removed 'isinstance', 'issubclass' from builtins whitelist +# 2007-03-07: Removed 'EmptyNode', 'Global' from AST whitelist +# 2007-03-07: Added import __builtin__; s/__builtins__/__builtin__ + +import UserDict # This is to get DictMixin +import threading # This is to get a lock +import time # This is to sleep +import subprocess # This is to start the external process +import harshexit # This is to kill the external process on timeout +import nonportable # This is to get the current runtime +import os # This is for some path manipulation +import repy_constants # This is to get our start-up directory +import safety_exceptions # This is for exception classes shared with tracebackrepy + +# Hide the DeprecationWarning for compiler +import warnings +warnings.simplefilter('ignore') +import compiler # Required for the code safety check +warnings.resetwarnings() + +import platform # This is for detecting Nokia tablets +import __builtin__ +import sys + +# Armon: This is how long we will wait for the external process +# to validate the safety of the user code before we timeout, +# and exit with an exception +# JAC: I've increased this to mitigate #744 +EVALUTATION_TIMEOUT = 15 + +if platform.machine().startswith('armv'): + # The Nokia needs more time to evaluate code safety, especially + # when under heavy loads + EVALUTATION_TIMEOUT = 200 + +_NODE_CLASS_OK = [ + 'Add', 'And', 'AssAttr', 'AssList', 'AssName', 'AssTuple', + 'Assert', 'Assign','AugAssign', 'Bitand', 'Bitor', 'Bitxor', 'Break', + 'CallFunc', 'Class', 'Compare', 'Const', 'Continue', + 'Dict', 'Discard', 'Div', 'Ellipsis', 'Expression', 'FloorDiv', + 'For', 'Function', 'Getattr', 'If', 'Keyword', + 'LeftShift', 'List', 'ListComp', 'ListCompFor', 'ListCompIf', 'Mod', + 'Module', 'Mul', 'Name', 'Node', 'Not', 'Or', 'Pass', 'Power', + 'Print', 'Printnl', 'Return', 'RightShift', 'Slice', 'Sliceobj', + 'Stmt', 'Sub', 'Subscript', 'Tuple', 'UnaryAdd', 'UnarySub', 'While', + ] +_NODE_ATTR_OK = [] +_STR_OK = ['__init__'] +# Disallow these due to the potential for encoding bugs (#982) +_STR_BAD = ['encode','decode'] +_STR_NOT_CONTAIN = ['__'] +_STR_NOT_BEGIN = ['im_','func_','tb_','f_','co_',] + +## conservative settings +#_NODE_ATTR_OK = ['flags'] +#_STR_NOT_CONTAIN = ['_'] +#_STR_NOT_BEGIN = [] + +# Checks the string safety +def _is_string_safe(token): + """ + + Checks if a string is safe based on the defined rules. + + + token: A value to check. + + + True if token is safe, false otherwise + """ + + # Check if it is explicitly allowed or the wrong type + if type(token) is not str and type(token) is not unicode: + return True + if token in _STR_BAD: + return False + if token in _STR_OK: + return True + + # Check all the prohibited sub-strings + for forbidden_substring in _STR_NOT_CONTAIN: + if forbidden_substring in token: + return False + + # Check all the prohibited prefixes + for forbidden_prefix in _STR_NOT_BEGIN: + if token[:len(forbidden_prefix)] == forbidden_prefix: + return False + + # Safe otherwise + return True + + +def _check_node(node): + if node.__class__.__name__ not in _NODE_CLASS_OK: + raise safety_exceptions.CheckNodeException(node.lineno,node.__class__.__name__) + for k,v in node.__dict__.items(): + # Don't allow the construction of unicode literals + if type(v) == unicode: + raise safety_exceptions.CheckStrException(node.lineno,k,v) + + if k in _NODE_ATTR_OK: continue + + # JAC: don't check doc strings for __ and the like... + if k == 'doc' and (node.__class__.__name__ in ['Module', 'Function', 'Class']): + continue + + + # Check the safety of any strings + if not _is_string_safe(v): + raise safety_exceptions.CheckStrException(node.lineno,k,v) + + for child in node.getChildNodes(): + _check_node(child) + +def _check_ast(code): + ast = compiler.parse(code) + _check_node(ast) + +_type = type +_compile_type = _type(compile('','','exec')) + +def safe_type(*args, **kwargs): + if len(args) != 1 or kwargs: + raise safety_exceptions.RunBuiltinException( + 'type() may only take exactly one non-keyword argument.') + + # Fix for #1189 + if _type(args[0]) is _type or _type(args[0]) is _compile_type: + raise exception_hierarchy.RunBuiltinException( + 'unsafe type() call.') + + return _type(args[0]) + +_BUILTIN_REPLACE = { + 'type' : safe_type +} + +# r = [v for v in dir(__builtin__) if v[0] != '_' and v[0] == v[0].upper()] ; r.sort() ; print r +_BUILTIN_OK = [ + '__debug__','quit','exit', + + 'ArithmeticError', 'AssertionError', 'AttributeError', 'DeprecationWarning', 'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception', 'False', 'FloatingPointError', 'FutureWarning', 'IOError', 'ImportError', 'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', 'None', 'NotImplemented', 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning', 'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError', 'True', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', 'UserWarning', 'ValueError', 'Warning', 'ZeroDivisionError', + + 'abs', 'bool', 'cmp', 'complex', 'dict', 'divmod', 'filter', 'float', 'frozenset', 'hex', 'id', 'int', 'len', 'list', 'long', 'map', 'max', 'min', 'object', 'oct', 'pow', 'range', 'reduce', 'repr', 'round', 'set', 'slice', 'str', 'sum', 'tuple', 'xrange', 'zip', + ] + +#this is zope's list... + #in ['False', 'None', 'True', 'abs', 'basestring', 'bool', 'callable', + #'chr', 'cmp', 'complex', 'divmod', 'float', 'hash', + #'hex', 'id', 'int', 'isinstance', 'issubclass', 'len', + #'long', 'oct', 'ord', 'pow', 'range', 'repr', 'round', + #'str', 'tuple', 'unichr', 'unicode', 'xrange', 'zip']: + + +_BUILTIN_STR = [ + 'copyright','credits','license','__name__','__doc__', + ] + +def _builtin_fnc(k): + def fnc(*vargs,**kargs): + raise safety_exceptions.RunBuiltinException(k) + return fnc +_builtin_globals = None +_builtin_globals_r = None +def _builtin_init(): + global _builtin_globals, _builtin_globals_r + if _builtin_globals != None: return + _builtin_globals_r = __builtin__.__dict__.copy() + r = _builtin_globals = {} + for k in __builtin__.__dict__.keys(): + v = None + # It's important to check _BUILTIN_REPLACE before _BUILTIN_OK because + # even if the name is defined in both, there must be a security reason + # why it was supposed to be replaced, not just allowed. + if k in _BUILTIN_REPLACE: v = _BUILTIN_REPLACE[k] + elif k in _BUILTIN_OK: v = __builtin__.__dict__[k] + elif k in _BUILTIN_STR: v = '' + else: v = _builtin_fnc(k) + r[k] = v + + # Armon: Make SafeDict available + _builtin_globals["SafeDict"] = get_SafeDict + +def _builtin_destroy(): + _builtin_init() + for k,v in _builtin_globals.items(): + __builtin__.__dict__[k] = v +def _builtin_restore(): + for k,v in _builtin_globals_r.items(): + __builtin__.__dict__[k] = v + + + +# Get a lock for serial_safe_check +SAFE_CHECK_LOCK = threading.Lock() + +# Wraps safe_check to serialize calls +def serial_safe_check(code): + """ + + Serializes calls to safe_check. This is because safe_check forks a new process + which may take many seconds to return. This prevents us from forking many new + python processes. + + + code: See safe_check. + + + As with safe_check. + + + See safe_check. + """ + # Acquire the lock + SAFE_CHECK_LOCK.acquire() + + try: + # Call safe check + return safe_check(code) + + finally: + # Release + SAFE_CHECK_LOCK.release() + + +def safe_check(code): + """Check the code to be safe.""" + # NOTE: This code will not work in Windows Mobile due to the reliance on subprocess + + # Get the path to safe_check.py by using the original start directory of python + path_to_safe_check = os.path.join(repy_constants.REPY_START_DIR, "safe_check.py") + + # Start a safety check process, reading from the user code and outputing to a pipe we can read + proc = subprocess.Popen([sys.executable, path_to_safe_check],stdin=subprocess.PIPE, stdout=subprocess.PIPE) + + # Write out the user code, close so the other end gets an EOF + proc.stdin.write(code) + proc.stdin.close() + + # Wait for the process to terminate + starttime = nonportable.getruntime() + status = None + + # Only wait up to EVALUTATION_TIMEOUT seconds before terminating + while status == None and (nonportable.getruntime() - starttime < EVALUTATION_TIMEOUT): + status = proc.poll() + time.sleep(0.02) + + else: + # Check if the process is still running + if status == None: + # Try to terminate the external process + try: + harshexit.portablekill(proc.pid) + except: + pass + + # Raise an exception + raise Exception, "Evaluation of code safety exceeded timeout threshold ("+str(nonportable.getruntime() - starttime)+" seconds)" + + + # Read the output and close the pipe + rawoutput = proc.stdout.read() + proc.stdout.close() + + # Interim fix for #1080: Get rid of stray debugging output on Android + # of the form "dlopen libpython2.6.so" and "dlopen /system/lib/libc.so", + # yet preserve all of the other output (including empty lines). + + output = "" + for line in rawoutput.split("\n"): + # Preserve empty lines + if line == "": + output += "\n" + continue + # Suppress debug messages we know can turn up + wordlist = line.split() + if wordlist[0]=="dlopen": + if wordlist[-1]=="/system/lib/libc.so": + continue + if wordlist[-1].startswith("libpython") and \ + wordlist[-1].endswith(".so"): + # We expect "libpython" + version number + ".so". + # The version number should be a string convertible to float. + # If it's not, raise an exception. + try: + versionstring = (wordlist[-1].replace("libpython", + "")).replace(".so", "") + junk = float(versionstring) + except TypeError, ValueError: + raise Exception("Unexpected debug output '" + line + + "' while evaluating code safety!") + else: + output += line + "\n" + + # Strip off the last newline character we added + output = output[0:-1] + + # Check the output, None is success, else it is a failure + if output == "None": + return True + + # If there is no output, this is a fatal error condition + elif output == "": + raise Exception, "Fatal error while evaluating code safety!" + + else: + # Raise the error from the output + raise safety_exceptions.SafeException, output + + +# Have the builtins already been destroyed? +BUILTINS_DESTROYED = False + +def safe_run(code,context=None): + """Exec code with only safe builtins on.""" + global BUILTINS_DESTROYED + if context == None: context = {} + + # Destroy the builtins if needed + if not BUILTINS_DESTROYED: + BUILTINS_DESTROYED = True + _builtin_destroy() + + try: + #exec code in _builtin_globals,context + context['__builtins__'] = _builtin_globals + exec code in context + #_builtin_restore() + except: + #_builtin_restore() + raise + +def safe_exec(code,context = None): + """Check the code to be safe, then run it with only safe builtins on.""" + serial_safe_check(code) + safe_run(code,context) + + +# Functional constructor for SafeDict +def get_SafeDict(*args,**kwargs): + return SafeDict(*args,**kwargs) + +# Safe dictionary, which prohibits "bad" keys +class SafeDict(UserDict.DictMixin): + """ + + A dictionary implementation which prohibits "unsafe" keys + from being set or get. + """ + + def __init__(self,from_dict=None): + # Create the underlying dictionary + self.__under__ = {} + + # Break if we are done... + if from_dict is None: + return + if type(from_dict) is not dict and not isinstance(from_dict,SafeDict): + return + + # If we are given a dict, try to copy its keys + for key,value in from_dict.items(): + # Skip __builtins__ and __doc__ since safe_run/python inserts that + if key in ["__builtins__","__doc__"]: + continue + + # Check the key type + if type(key) is not str and type(key) is not unicode: + raise TypeError, "'SafeDict' keys must be of string type!" + + # Check if the key is safe + if _is_string_safe(key): + self.__under__[key] = value + + # Throw an exception if the key is unsafe + else: + raise ValueError, "Unsafe key: '"+key+"'" + + # Allow getting items + def __getitem__(self,key): + if type(key) is not str and type(key) is not unicode: + raise TypeError, "'SafeDict' keys must be of string type!" + if not _is_string_safe(key): + raise ValueError, "Unsafe key: '"+key+"'" + + return self.__under__.__getitem__(key) + + # Allow setting items + def __setitem__(self,key,value): + if type(key) is not str and type(key) is not unicode: + raise TypeError, "'SafeDict' keys must be of string type!" + if not _is_string_safe(key): + raise ValueError, "Unsafe key: '"+key+"'" + + return self.__under__.__setitem__(key,value) + + # Allow deleting items + def __delitem__(self,key): + if type(key) is not str and type(key) is not unicode: + raise TypeError, "'SafeDict' keys must be of string type!" + if not _is_string_safe(key): + raise ValueError, "Unsafe key: '"+key+"'" + + return self.__under__.__delitem__(key) + + # Allow checking if a key is set + def __contains__(self,key): + if type(key) is not str and type(key) is not unicode: + raise TypeError, "'SafeDict' keys must be of string type!" + if not _is_string_safe(key): + raise ValueError, "Unsafe key: '"+key+"'" + + return self.__under__.__contains__(key) + + # Return the key set + def keys(self): + # Get the keys from the underlying dict + keys = self.__under__.keys() + + # Filter out the unsafe keys + safe_keys = [] + + for key in keys: + if _is_string_safe(key): + safe_keys.append(key) + + # Return the safe keys + return safe_keys + + + # allow us to be printed + # this gets around the __repr__ infinite loop issue ( #918 ) for simple cases + # It seems unlikely this is adequate for more complex cases (like safedicts + # that refer to each other) + def __repr__(self): + newdict = {} + for safekey in self.keys(): + if self.__under__[safekey] == self: + newdict[safekey] = newdict + else: + newdict[safekey] = self.__under__[safekey] + return newdict.__repr__() + + # Allow a copy of us + def copy(self): + # Create a new instance + copy_inst = SafeDict(self.__under__) + + # Return a new instance + return copy_inst + + # Make our fields read-only + # This means __getattr__ can do its normal thing, but any + # setters need to be overridden to prohibit adding/deleting/updating + + def __setattr__(self,name,value): + # Allow setting __under__ on initialization + if name == "__under__" and name not in self.__dict__: + self.__dict__[name] = value + return + + raise TypeError,"'SafeDict' attributes are read-only!" + + def __delattr__(self,name): + raise TypeError,"'SafeDict' attributes are read-only!" + + +if __name__ == '__main__': + import unittest + + class TestSafe(unittest.TestCase): + def test_check_node_import(self): + self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"import os") + def test_check_node_from(self): + self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"from os import *") + def test_check_node_exec(self): + self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"exec 'None'") + def test_check_node_raise(self): + self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"raise Exception") + def test_check_node_global(self): + self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"global abs") + + def test_check_str_x(self): + self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"x__ = 1") + def test_check_str_str(self): + self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"x = '__'") + def test_check_str_class(self): + self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"None.__class__") + def test_check_str_func_globals(self): + self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"def x(): pass; x.func_globals") + def test_check_str_init(self): + safe_exec("def __init__(self): pass") + def test_check_str_subclasses(self): + self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"object.__subclasses__") + def test_check_str_properties(self): + code = """ +class X(object): + def __get__(self,k,t=None): + 1/0 +""" + self.assertRaises(safety_exceptions.CheckStrException,safe_exec,code) + def test_check_str_unicode(self): + self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"u'__'") + + def test_run_builtin_open(self): + self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"open('test.txt','w')") + def test_run_builtin_getattr(self): + self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"getattr(None,'x')") + def test_run_builtin_abs(self): + safe_exec("abs(-1)") + def test_run_builtin_open_fnc(self): + def test(): + f = open('test.txt','w') + self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"test()",{'test':test}) + def test_run_builtin_open_context(self): + #this demonstrates how python jumps into some mystical + #restricted mode at this point .. causing this to throw + #an IOError. a bit strange, if you ask me. + self.assertRaises(IOError,safe_exec,"test('test.txt','w')",{'test':open}) + def test_run_builtin_type_context(self): + #however, even though this is also a very dangerous function + #python's mystical restricted mode doesn't throw anything. + safe_exec("test(1)",{'test':type}) + def test_run_builtin_dir(self): + self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"dir(None)") + + def test_run_exeception_div(self): + self.assertRaises(ZeroDivisionError,safe_exec,"1/0") + def test_run_exeception_i(self): + self.assertRaises(ValueError,safe_exec,"(-1)**0.5") + + def test_misc_callback(self): + self.value = None + def test(): self.value = 1 + safe_exec("test()", {'test':test}) + self.assertEqual(self.value, 1) + def test_misc_safe(self): + self.value = None + def test(v): self.value = v + code = """ +class Test: + def __init__(self,value): + self.x = value + self.y = 4 + def run(self): + for n in xrange(0,34): + self.x += n + self.y *= n + return self.x+self.y +b = Test(value) +r = b.run() +test(r) +""" + safe_exec(code,{'value':3,'test':test}) + self.assertEqual(self.value, 564) + + def test_misc_context_reset(self): + #test that local contact is reset + safe_exec("abs = None") + safe_exec("abs(-1)") + safe_run("abs = None") + safe_run("abs(-1)") + + def test_misc_syntax_error(self): + self.assertRaises(SyntaxError,safe_exec,"/") + + def test_misc_context_switch(self): + self.value = None + def test(v): self.value = v + safe_exec(""" +def test2(): + open('test.txt','w') +test(test2) +""",{'test':test}) + self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"test()",{'test':self.value}) + + def test_misc_context_junk(self): + #test that stuff isn't being added into *my* context + #except what i want in it.. + c = {} + safe_exec("b=1",c) + self.assertEqual(c['b'],1) + + def test_misc_context_later(self): + #honestly, i'd rec that people don't do this, but + #at least we've got it covered ... + c = {} + safe_exec("def test(): open('test.txt','w')",c) + self.assertRaises(safety_exceptions.RunBuiltinException,c['test']) + + #def test_misc_test(self): + #code = "".join(open('test.py').readlines()) + #safe_check(code) + + def test_misc_builtin_globals_write(self): + #check that a user can't modify the special _builtin_globals stuff + safe_exec("abs = None") + self.assertNotEqual(_builtin_globals['abs'],None) + + #def test_misc_builtin_globals_used(self): + ##check that the same builtin globals are always used + #c1,c2 = {},{} + #safe_exec("def test(): pass",c1) + #safe_exec("def test(): pass",c2) + #self.assertEqual(c1['test'].func_globals,c2['test'].func_globals) + #self.assertEqual(c1['test'].func_globals,_builtin_globals) + + def test_misc_builtin_globals_used(self): + #check that the same builtin globals are always used + c = {} + safe_exec("def test1(): pass",c) + safe_exec("def test2(): pass",c) + self.assertEqual(c['test1'].func_globals,c['test2'].func_globals) + self.assertEqual(c['test1'].func_globals['__builtins__'],_builtin_globals) + self.assertEqual(c['__builtins__'],_builtin_globals) + + def test_misc_type_escape(self): + #tests that 'type' isn't allowed anymore + #with type defined, you could create magical classes like this: + code = """ +def delmethod(self): 1/0 +foo=type('Foo', (object,), {'_' + '_del_' + '_':delmethod})() +foo.error +""" + try: + self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,code) + finally: + pass + + def test_misc_recursive_fnc(self): + code = "def test():test()\ntest()" + self.assertRaises(RuntimeError,safe_exec,code) + + + unittest.main() + + #safe_exec('print locals()') + diff --git a/safe.pyc b/safe.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b6d030ba2810e80a11704bc654432713130b603 GIT binary patch literal 24838 zcmd^ndvF}ddEYm?OAuHBd_Rxm-H^vK03HY)?|nSp0|dZ3h&+Iq1rK+y>AA)B09atL zvz(bFffH#b;>nWY$d+wIar}s#M2-?CkrL(kud3o$QC{UZ<#JwCPAXL?CzbLaPF0f1 zAIk6d_3Z2d1Z_D0DOZKW_V)Djbbqh@Ufr|#-|s8_s(WU-BI&;pzTd_t&KE?C$Sg`n zOipT!%%WoJIhoD1>Uo*Xx9SC%Ewt)8WOhfZ-X*hLt@=)x-Px*l%WQY6-XpU;t$I;r zixL!NX_uJ1+;_xDC+?NmUcJ#L_a$rHEwj5@kM5D#J+1m)ncXV}K<$%Yw=C`FGfM}= z?BI~m?}`Cj`=kyO0TBSf{6#UCvsX-?7>tRbFv31D`^DhFgJKSeIV|Rgn4@BjiFrcI zaWN;voD}n9+^xgzErSpYWx?Yr^`o(0SkS^?yr(#i=5PRi_)65tOgT-Cp8a$jjb z3yj{=Zl|Q`NC506<>gsmI3?z~m_H%L6H^j1E#`)p_mvzoVm=V_p_o}QKPcvh#M~6~ z!(x0fABic8nG;hHW5fhv=EW?CSrk(hvm|C&Oij#+n7WuyOhe2qF_D;9Od_T!=C+tS zVphehiTPN}r^VbAb5G2Vi1|@5e^Sh!67v}`_r-i7=CfjcOw8xRd|u2K#QbS7KQ89a zi20(JpAhqtV!kBir^NiUn4b~zWifwN%vZ$xIWa#g=Ff}yIWa#k<`=~LqL{xR=9k3$ zvMiKj7To=dV*ZktzbxjfVtz$Qd`8S)QA&a9IVpqdD8cxzX(QPGSG58m_-k5uR)U-? z4N)#<}5LyX7PaRvHPu-AWXclWKjzExU1fKJbIppyJMlk=s~H7Q^~*u{gb0jotZbEpYMO zXwKED71x9->oA4I2g90R6?Vp@PUP9IV#ta z01qXhdnb&Rhus@-K$zCTX5@x<>TXnxmroTh2Y1R*aLRS3qh_37HaDyXr;5evwV;d# zf^&eS<~A0qwZ(EA2WxMyl^4TsxDu|o)p-sE=mDq&tI~|B$(p+u)`A$wU9EGTT6LwG zfDB+P)vEO% zigg5zQmZZpZn78v$UD_q&8>x%Ww+U2lN+za;K4=r^c}z*xEC(E4FG?7$UXi13oqjD z>{4?+$R#+S;IU%bjahYXB%#PLO}@NNL)g1ewtk1iL!eWA2C2ZD=yAVL$?v&R+iUeK25RkG~%LY>#H{)Qw30|t~;Xwn*vHW9WOF{kn(0skL*Oe%tw4rU z{^y(ZiY}9ES}cT>0K7Ia77Zw}meP)#0^L+5l{&tk3ajtC5tb2%)utvWP=$Qg!)T>k zgJ6`C02G_aIIY52UZBpoT`dC?ifomTuzO2ywPa-)j5!U8Cg9p>s3OX)73>WDmfGTi zh6F?D07VM7B};}L7bV4{3@X(Zf`~e;HHiD^{*^T<#{P>e@2Nnjiqq8&>?s1-P~FS-yFi}vZBJFUU zL?XGaLknAaUXQAEOAM{mYdW|WK%3QddXMVI!ptX2Fu9hsS_|ivbmUU9lIV=3=A2@R z@3<0G8;QX;gCphv9jgL0_ezjgrd|yEc>J($Gnn0_hEp8I?>WZ(= zav9i`WX@NN=j~-B^eYZJva`wwzT=-ok%-)dOXEo5$a0TF105~7R!d$==mZ3pyCiy5 zFqK|+>Giu@6{XcD1zaAxd06f_aub4ottpvV#mL?0;L5!m?}9pabDY@47rm&4 z?b?tTjyFJP0w?FlyrUeY|e;EC3BL+*UJ z29q5^6-IZeG4;v-MO8&1S!-C~1B|)=n)RykjgranERSjAE{}{U`>t>}?W?wL>T0qV zeKR{aIyE^xGBKI#?rc=!s6tQ+>I3Aj4yAx52XG0$Xs&8ZQ1@fVqS8x;ZMR#4IDo#Q zMSKah^NZn1aK1bTo4pt;pRX)Z*T()z2$2jU-wO5l4VPrXFpN1hr(cbIJDtzw+h~fP zKp}l+oPyJ9Klwh?x=|iRsS--+P}hy(6Mq*4WMv6|kWk3UPKlnQye$>9eMkH5{hWNp zVZ6EeHEryYrJZ!P$8LT_ekez&gxrDTR_Gvol({t>tJoR~VhfSXrvpKXp@=B*KQ6c4 z=Fn(?r_Tq!(ViF)FZ+BW?UVGTQ^apSba0G@zZt9#UK=Tu ze2CdT+$WQgH3S=#prQUmvLCIw*r-RxTkWcX=vBj=u;Uuo5sXW{0v~lH_Q*SL9EA?7 zLzI%JJwhLD!LtJdBhwK7sjJ>~c4jl2pHV14qyDnm0bkdoUk3~W6b@Znyn@DplF{y<8$nAHI=X#|p|D@AZ=yjgT9dJ}`sg3zjxjl&rG%JJ_ z42F(_^g((dKBzm&Rl+`Lmn(^gz^DPWZ*39mb{y#B0~}EiR?U&eRXI+!5k)m^DTOie z*D*m%B8#)f0Rrjd3_vTqA~)1?8zFo}E#pm>>3K-CTm!p~oW?yn&vEF;JZ zR?C$H7YIz@#MaL(TmJ(wn6&}JRXiy%Rb;cS)*~H;9^tSqJMS89dN)w0CiR;2%Qm=c zi!vecRn@Cdr+l<696DQ0{$e$V%28!;t<6+`66Iz94aZ@Z{7x4a4PT!}j9!djUet^eKW*AJpC@MJFMIN;Ej40H_ua*TAS$ zt#7i=5Q(XhtrNv5#MARg8Q2zbq}9?w`xx4;BOC^SHz7Is)75UBlI}} zMD-Ul6!OMUv(3#FZBDXych!!_xjsfDM3gFFV=ZDtWvPORkZaI72LrBNiQ|!E&`n_l zt_-z3@#cp^spkG1sH0BVk4YMMlvKS=R?s8s{#2{GwaK4Xn+(HZ=wDe&%u!X-dsWn| ziY9VM=s-uawyY|ey*-~@@R6;1%EV;8Uio(%j4+6!11kVJkN1zzXxp;6w>~WwM3(%) z{Qnl$EyJHv+7U1^ew26q0SeF#^n#I(_A22ZgiGp(--lDOLqN8|J=EZU-=nj`dtKH6 zuaPx)mVqB^|4#Lnf0z|q9z$#W4ZYE=H$G0EZrwYj_u#L?$;OU?`tkK%f#-rAa1c1E zMwGa`bn7*QkGmAsoLs`3CH1i;y#->>jiLmj$i&A0NnqovRuXKX7S168^FGTv)B_#) zG4BI5k{}daHAfl!(&v1eT!5bu+i@@7n7B4QG3k5b*RPF?j@uzyjqtja39W0yf!?oi z4z^+VA(xr=8!UdE z#fL1u&4R@5SP~!c5})l_gc@bHqyTyPBnsDlAH&B~g5vCSjyVq^JB(YzQr-N`u+%jG z@AlHIzI9lVT;6wBe18y3Grj*45Tr{U=#c^ZWV}3!iwrmCL)ww=ng>c`mrpl2O3&7z*E&9%6?mxTn*9Na|gHFVtL&J_|pNR$V6 zkhKphNI_}_$mY`N?yIN2l5Qh~b!5CTIXF=$3*`C~bzDoimp;O&X2Fc2A@O+jLl)Te3;02S=NF*qA#txuR{xLO1>!h2(9H?PZvI!f z2Mf8A2PlZd4d-qb@4bS)w_b2KCdhVN2L$oDdBoxv&RDSTo*b`t;ks9^Kj)-R*w&>J zJcEHrqLZzTODB0I!#1rwufyT-B^$}tXy&g%PlDj>8;9gN?ZuRC0i7U%R ziMn&ny{$?&Y zg=90XEfL<&ap3(zqQVC_ElW?cH!i{I(!fE=8DM zCkUO?=|v4&$P1r7`1N=G70aa7jv}SxCU=y;n^^7^8U$K>K z+<};$F7~>x?PcOYbwOh4*dkp_(aB6LYy@@PdQyM(yxoI7U#rgXQyGp!^_Nk%Dk{$7 zQMASk_-@pL)dt*nOt0~~3&0!LqU?|9@%D4i5&NA%3OsaP2jr*J&4~xm@d`E@Yi_{E z{ttd+a^MWQYxEJ?0H;w-7S$_SVoc5u5`n^+&#hC$jd1DN1pntlB(X=lX@6|OvwEvJ zm+D%j{&iBpMh|eFsMwftY9`#Q)8VgATp#!T4JRkF?jRjuceW}u?3t@$g%l41eSLxh zz=euzG;94iCS%6mAGvm8dSrTHYSN#ccyD~_#WNx4UK<1y3sl!cIWKoUEHYQQB~DRIZC*@yr_OCVs(4L7zaeF zl@UaJjU#=9#T6DeP-s@P1gAB#F6z0ec2_r)Ebx=CLnm9DU@sK`Fan#6M2y}H_i=Gz zgP29-J62xwMp~u)XL46zAoupBjMn!fU z&7aNhb^7wXPFMaJxSjh7y}7(|(8)W6{F%ZYr?0EaIh^Z-i`s>9FaCEq2b=?iywm3# z!WEs@gSq{Aj!?j-w@_i2#Sd4~Wqe`}1&j|Y4|V4uhB7csP_Xd2V?9Px#?dV6?Vxq- zumsqC(6I9otPyI38#DDH&B13xXc6>u566$6K{3wrJG%6A*IiST)IjZ{A~@7zqk)zm zj->shW;w0!GTV^`3MGGR9I?XGhvQ>bNd6V>slRL;WLkao1=7v-ZRCy0gsM)gtlQ)f z^{wO)5ahc+cRBz?Q&!LIQ5Vd_7U8xPgoLsaKDf7HbO z8xB?Q#LDKBoV0}aZ4Bn^0F-UIcm-afeY*k&E+Zuf)zueB5v~gX6)O6Tf>NOAa;_mu z0SL@bv@;KQO1yuJ!os=2OGw#v!s-1>jQ1siN4XbAr&9=d$u?3xhEpT~Qd%0U^APGT zhTEUuY#cnP98a?G)M;KTu*e0iz>M$E3S5gWt-y@$)Jl;So=;(%9Kc_*JZjxYar7i0D(!YVY zAjhG1qrcB>K|r|#D3RE}a4>oX(B$A??NBEZZ8L;tfCgpK%YohAw@{7yQS{S-FzwN= zWWt#+93+EF1aHRf(YoBjIHk_f4Z9lxa$a=%HSy4|q8V$xqC-Q)oGDGu*>nomgYP3j z7(~9Ws>|*LeiiL%1d%6I&j7ZLB5xz$G1kUesOjTgpfxK6)YO>8L2X`b79%)c6Q^8P z08y>AZr^X?y7n$&@So>;p)V=99vTnlGz`cw7@4m88Em1z*tAUv0abbW2o)}JD>n8> z1vad+7JH;Zx3ICaQZB1dK`P?w6fLYYJbnc0tQ9(d?Yf(7M=fb|IJq^pdOLyLKsTVZ z40;#~yl(VMb75FS+}`zltEh^&B}A3;0Pr6%A@FKpMCE9G z7-G~GXJsq*h;VpwTaW5^{{%DsZ-P$7k)sb<#n*5HFv7JUr-qbG>NN7;94;ZsdWk-| zIR@_z?grctLspRylPbUQ8B{xbMxN!>gB|u224*+TG59)V@umQ&@7BRf0?7ks+ikH@ zX9Nb4K>2!~Pa;e3k`0R`s4dT6B-r#-k#%i;hyS z^5Y$2?FkfZ*tNFhEoEH`U&g@kVSv)L1L>HPJ*|HSdWt=}db%}%0PtY~(nE>U*lc5H zQo%F&4+Gd>^a5&GF!~9CNC+u{QB;uPx4|f)QX7oIX|};AqEZ`-A}Y1PD56puj3TVE z!6>3q8;l}=vcV{#(p_m(ihMbzK%To#D|?vpW()Sb_h@Aw-&$nLe&(oI!LGr6tsG<$ znk|R;HX|!|f$)%4khDImm7{#Wk+%?H9@Pro7(AvGWd5Jf3bOOZwSt`d39TUYeo`w> z@l8lR?(%&|R!;G)NLEfWq&@}mse&gR3;5^)U=(8)J{G*)gaIva(AAzrw5x%!8$xa5 zP~9-LJ5V$oWj*HQm2tc|wg)8MN2}L`3DoMTL2C=smgOj+`0y2wj&q;Bh3jB@P5eLN z$cOM}=8dYRmX^6CYsH@B0rdtw)Ryv;P|q0#c{8j1t`ns*2(4MS;i6 zsSUc2oYFTIp58KQJb;h)f85o9LHic(>+b5Xz8#ew)_TmC>h?$5s1egnXr9>wVKiCm zO*UEg6+uX+#Xg!M*?LcGfN9bcUu&Bm-6GTAXgxwaI@Z4lB7ZbDgu2!qPK8+FjU^Qb zym;h4W{z=xedD6bpc&kNCV7>i>EZ(J6~jdhwlbd!QP&2a>rHemDu(U{0~YH+`?foU z*Y|l2&{wt&Ee|DcaIvQMxzBOo{By;JQ-v_uY=c22Tp8@6wzhJ7!u8UYG35C$2rT2*0ZQsn|LS8$k1A1DZ6|suP_Jx)A)5XWB}+es zp4qtzgO6ILX6CRHc+CCU2EYk4!P40794a<2eo)~cPR2`8ObF%W<@<%ycyMvr^YN78 zJU%6S^!SlAGk6SpA2`en#gS%(G{e#oTqrc0!BJ}H?wpD>+w{i#cB&e}%4sjPNuY;v z*)}lfJ&#!*zJN$4s{yqiuC^;#lWD+CPLT{g&eFe5SXBc$J-IAU%@}qHU}S~?Tg+e{ zn1n56kiE?$vSm`wS176*l*UHHF`iW#-gBe~d9m$rIbABk;y-|MdeY>rj7RnmD50Px zS25Lvsgg=$HSPE^gv`1#7FkkAk-d;IR{2L4M)xQmBUaoC=NUHsSjAX|mCc+JD>7HH z`kt&A6Dy`_b>ibz_nWE|0P}8!)h0Ij2msz7p&R@bG7R2_P4{XT{!3_igfSgK%7l+; z1^bRNjso1ix;0eD9V+*;XP+B>sRJRNYkPzUt+T@&2vy(PfDSHo6_MmIg$_#j^GE_a z^11czK2epd)Av|i=!pF)mRNB(+T2=jT$a`ZprZS{^yf8E4t2!gQ{~Uu?TfZ{`L(y7 z*Xx)qr~Oo;dBmL%ZPfA587)QsS=4k-L7AgzbDN?Zztt?)+WRn8MRKsV;@C2jdf-y! za0Ne^;`4n5{z%Vt!rjfeoF1GwJZRSxIYvS&=Xj{)VVncou<6LtM&KW~2vbmymua?l zz#?p9BVMX%v5^{cGP5w`RmMiF-wrmCe3Km#!Taz zX_$`A(8m6>iH&Xa{7y$;!#H-B$rin#XSx{gX!9DHU(_asNU%j|)SLCf`yDQh2-aQu z+Tp-+LodA8F?Kf4s{8C4ZasS}YhQEEcDCD>N$lLK&sq?Qa{z(}4OqyF5w7Qk3Cv)T z;xP~n7OUjDl~lSP)fYh(>#M96Ved8EZEu{}g#b}WLpEg^F!FDJm8ZR;d_N?a;G-nA&-`!GKr4gcn+%iVE~iKX2_v zXAEHt2pPu@r~G4~d77jJfRXqG6h&$XF30DQ1=NMUSpaSqtackL>gwyTKK5rIHZ0io zz5|2xM{RPlkanFQ+n=c;-`nfg$rtvuWq|xwS$-B30E+O?u41O4U_t2N+2AmT_k?i5 zrDavP2qyJClesr8sGQGP`%5<4WvePnFR7WdPh~q7?|NxZJ#nA!8^BPeuVv!Md0!jb zY~>FBLD)j1d430q3pi&8lTwqhi?Ss!%o=YdE7qytT$Tx)1?Ndk~3dhVX6qoEz6-NMBt;&1KP18V`lPlF9|N=QL# z>sN}s3%BeT;`Q~n+HD|e`m0SbQ(`Tdy3pE+Sl3F`q(3vN2bqcKg{=0x9^!73Up)nV zPo#72KsnH%gtlAs_#*hjIe<5I%`cDv_Lv#hWFLMeDfJ`PF@h2ShXH!%<0snjr9F;7 zKHxY453qlr^y8D0@f=Fk;y4$B7ntyqmaRPw=Dz2{@C}CQ1N~RRuz$!+Gw4HuL--j; zzu$cpn{{|Ft@U5b2Dvu~KMhY0s{wr=hfb|-O*x@>G&%Gv{!^{435Nhb!q^CY1#?>G zWM>!Na(cpMkO&AxTot-*Zmr*7z&LVxqOjG>Q2$4LwCLY z!Y=|f@%ym11E$NqNrZ`;;56RA(*%ZIi+3C=*?WqG%i##v0T_!Nt) zDB5~{dvu<65`!KfRW5@LT}UFP|MOXmEjxQ!*M*)PJq5kWqjpg56jAQ#>B9l6zJW_5 z=eJPk`78do4c`ROgNl4xwagz4^8PzJ{U;W`$KrQc{0A1l!{P=Cefp055vN$yP^_=! z*x#)2=GdhVh5Fc8zizbP<;DbI*^2jVTzLPP#eYMg=RH@-_}R94;QbG_PP6zy785Mq zVeu&zS6S$bAval@XTi1LEuqkH(qG>4mf0Gz_-8EsIg5Y6;@4QHC->{DeTT(wu+SIi zeiJoKX5%-8mc4(?_TOglTP)sXkte_TKAAQBd~--iNLs#EU9HybX;8|pQVKgq_(|{d zcUPxT87<>yZOB`lhlk))UfRXhtz|bRzhqAbzR4};_(xE5o#9Iq>;B{I6&?mI;%yfE z@9N4O%J<~=nt)j^cAXN7?H|O5;#>&*`4SJ%ujkXs)L>?-Y0Rjp3R*kn^lL=glqKwoI|*FDw0tMzpnVi?a;zC`^x?bgG%M)@)7FVpTa#tW3J5VyxE!XaR$Z^34jvSs=f z@|`5TM%fDOSE&3h(x*sQsdt**fa!7CuTn`zCr^wfvsHQ!mS!hu<3YAY55D2?8NwL5 zqk=}HvQxCNdv-iHjlueOa0Y{O49?Tr;y}ij_1nEb#mac;aV)(+dX+So ztkatc{SNlMNV}IvUnKXZNMCxNY)~Am!4PaN(3?5gby;>@Cij>3@5;`B6g0zuZZ|gg zoc~-Qs^5B2+D@7D)#IeEzo;wabdklKeq!>i!C}3AYuhKK*BL*$t5e-?>r(yf)290Q zjT@ig@$I4S+LE7|7fJC|>b4FN*EL1Q+hR*+Lp*qh2kMS?X*Y4cF#BC>donBs)(OLH zFmwt_G&5C0hs`p4X`JW22&0lTwZp<=?^Bm_Wam!TIOWpP47^G~psiIoEYx0U`^tB< zN^QTN6q(9Rp_R9)qYGUoUaLfT?L0&imRoA)?Zrc8p^UqUH9t~=L+DtrWjYhfsNafM z@zCk=`FJom$k6u|jT^Rm_-Whh5I>3cN!oo5YR|g5gaCOi5+-G5*w=-J{+M;XguZNx zOobf~0m2Rv-(~p|>@RhaCGA|RJ%h%UyrOTM7_G5~ae;Nhjp<(9s$F|X+lZ0Vce zi!HL-b?q?E*lf@OEHQ2K+#Wz}?79#0H z%S^pUO2e`_)jkA)6k786F^OQe#?)rb(1pc1>GcMThw#k;%sc(Ay(l>)@ zSJ@iusNn$JR^9vqe~%l8T;x~KJbL)(-lJje%>dpTrWZ%kcM_+M&g_8lbmm@>+Dw;bHpQOxbCK^s7(35VVvYPU%4*DJ z9WiTt4>7|pb4cc1g9Udz2#P~iYW6PkSgCCdhaL`Q9cJnpQv-pnuU)iI7vTo|Ka2+_=WYxqEXNY`=#>CTE)+zh9 zXz}HUT^B0L6`9KRE?KiTLx!*Nl`CwuB3x28(5cxoxKDib;Cp#KVKr89Y*;0{f^K3; z_;i`15 zEM^vnGk(zNM)7QHMK>bV(tcPLGgKNo6e2%DH3)= zS+fT)8&>0OhkIq_Q08crWho!Fn6JbdW|{+#`0(B)dI6at+37m=oAH{`weqgXHvmJy>p^dHL%_g z*8V)3UeJ&|%GvKj!z1}ELiDD$o9e(IsT<`sYID>XO>?S)Rmf2{C*fn76mnuG0YF0j zW=M%Px71TNLJINTNy0oviZPlX;X73Sc0iN_GLZP-p8n8z&LW-m4Ev(l)U_K|CprvD z1K@-O#;IY*MTZ4&3gyCwq2AkfvGbs7CutzZuuwiAVU*%Q;4aCslH+uUu@f;y#(tNi zz^o+UlarC!!R|?v6b=(Ad`RC*LQatj0QKJe;oRO&fuqqF(8D@bmBs5rAJirzQ@tM% zy~O)3M{7O=i$}e(TiRi#tAgu*SAt3-H3I`*8=uPuK=BABHq{6ZXK?Na90J`?08NZI zvkc{0;S*DgvYl`Y@;tyIYd->2*)9;}IB?E5W?`YcM%|K`OdZ;x6oj+B_@!%3c5v4D zuQv!B-A7F+NVsE>NE2;`q6BF&Q8F0hCKcp&%NX0#Ykv(sr<1U!Y&(Xduc05zdF`%o zsC`jZ*}`!l9uAmq^)Dya`tvJ4c30eSJav6P*pg$}LOpAK1vr*uKs7GlX~0j0EXe~} z=x~r86Kyegx1=g!Nnx3IkQ{QN&|=!HRc;WvLeZq9@`>9hkUsf^02D4z+yzYQvc=_7 ziy;W%o`}~7%Cj+8#N4#SP}`4oo=8V`b1O?7zd-Q06M)Zc2bseuLX#=BRR zC{S4%*e+~EiN_aNKtFMDxW_^$&GGu6wT8t~j>y^ga5kgsEmH6uY<-MXLbvZaGRsAw z=*Psjb@@V&z&yT{CH&%3IaOIk0&p=}K@zYY zts)%=^VR5dbT0a+vQe!^VxTyN21OhjK5`>x!996>anmqu_c@v$^sZuDO5F^8`P{Jk z7*e8eN>9>Z|aIS~0^%Gvp*CD)nIZ~(x z_hrwVKIC#ec<|Br9X?r>O58!#iziPr$`)V7)_HC|<8|z}vE-(4EaD)>iVk=S^FliM zrSUD6t;Km|>jv*)b=jk&!_hpr6Pr1d0TKg7af&@u3^A$@ramOS|5onEn%wH!Y?$jW zF0vqZ(bV8H_@G@4pGqyNRce(pH + send_gmail.py + + + December 17, 2008 + + + ivan@cs.washington.edu + Ivan Beschastnikh + + + Sends an email using an existing gmail account + + + This script can be run from the command line to generate a test + email. The command line usage is: + $ python seng_gmail.py [gmail_user] [gmail_pwd] [to] [subj] [body] [attach] + Where all the arguments are strings and attach is a path to a + readable file or is missing (for no attachment). + + As an import, this file should be used as follows: + + Fist, initialize the global username and password variables by + calling init_gmail(gmail_user,gmail_pwd). + + Second, use send_gmail(to,subject,text,attach) to send emails. +""" + +import os +import traceback +import sys + +import smtplib +from email.MIMEMultipart import MIMEMultipart +from email.MIMEBase import MIMEBase +from email.MIMEText import MIMEText +from email import Encoders + +GMAIL_USER="" +GMAIL_PWD="" + +gmail_file_name = "/home/monzum/monitor_script/seattle_gmail_info" + +def init_gmail(gmail_user="", gmail_pwd="", gmail_user_shvarname="GMAIL_USER", gmail_pwd_shvarname="GMAIL_PWD"): + """ + + Sets up the global variables GMAIL_USER and GMAIL_PWD for use by send_gmail() + + + gmail_user (optional) : + gmail username to use + gmail_pwd (optional): + gmail password for gmail_user + gmail_user_shvarname (optional): + if gmail_user is "" then this specifies the shell + variable name to use for extracting the gmail username + gmail_pwd_shvarname (optional): + if gmail_pwd is "" then this specifies the shell + variable name to use for extracting the gmail password + + None + + + Sets GMAIL_USER and GMAIL_PWD global variables + + + (True, "") on success and (False, explanation) on failure, + where explanation is a string explaining what went wrong + """ + global GMAIL_USER + global GMAIL_PWD + + gmail_user_info = {} + + # Get full file path + file_path = os.path.join(os.getcwd(), gmail_file_name) + + if os.path.isfile(file_path): + gmail_file_object = open(file_path, 'r') + print 'read file ' + file_path + gmail_user_info = eval(gmail_file_object.read()) + + GMAIL_USER = gmail_user_info['GMAIL_USER'] + GMAIL_PWD = gmail_user_info['GMAIL_PWD'] + print 'loaded gmail info' + else: + return False, "Make sure the file '" + gmail_file_name + "' is in the current directory" + + return True, "" + + +def send_gmail(to, subject, text, attach): + """ + + Sends an email to 'to' with subject 'subject' with text 'test' + and attachment filename 'attach'. Uses the gmail account + specified by GMAIL_USER and GMAIL_PWD global variables. + + GMAIL_USER and GMAIL_PWD must be set up with init_gmail() + prior to calling this function. + + + to: + who to send the email to, an email address string + subject: + the string subject line of the email + text: + the string text body of the email + attach: + the filename to attach to the message + + + Not sure? + + + Sends an email through gmail to a recipient. + + + (True,"") on succes, (False,explanation) on failure, where + explanation contains the string explaining the failure + """ + if GMAIL_USER is "": + return False, "GMAIL_USER not set, did you run init_gmail()?" + if GMAIL_PWD is "": + return False, "GMAIL_PWD not set, did you run init_gmail()?" + + msg = MIMEMultipart() + msg['From'] = GMAIL_USER + msg['To'] = to + msg['Subject'] = subject + + msg.attach(MIMEText(text)) + + if attach != "": + part = MIMEBase('application', 'octet-stream') + part.set_payload(open(attach, 'rb').read()) + Encoders.encode_base64(part) + part.add_header('Content-Disposition', + 'attachment; filename="%s"' % os.path.basename(attach)) + msg.attach(part) + + mailServer = smtplib.SMTP("smtp.gmail.com", 587) + mailServer.ehlo() + mailServer.starttls() + mailServer.ehlo() + + try: + mailServer.login(GMAIL_USER, GMAIL_PWD) + except smtplib.SMTPAuthenticationError, (code,resp): + return False, str(code) + " " + str(resp) + + mailServer.sendmail(GMAIL_USER, to, msg.as_string()) + + # Should be mailServer.quit(), but that crashes... + mailServer.close() + return True, "" + +if __name__ == "__main__": + if len(sys.argv) != 6 and len(sys.argv) != 7: + print "usage:", sys.argv[0], "[gmail_user] [gmail_pwd] [to] [subj] [body] [optional:attach]" + sys.exit(0) + + + gmail_user = sys.argv[1] + gmail_pwd = sys.argv[2] + to = sys.argv[3] + subj = sys.argv[4] + body = sys.argv[5] + + if len(sys.argv) == 6: + attach = "" + else: + attach = sys.argv[6] + + succes, explain_str = init_gmail(gmail_user, gmail_pwd) + if not succes: + print explain_str + sys.exit(0) + + success, explain_str = send_gmail(to,subj,body,attach) + if not success: + print explain_str + sys.exit(0) + + print "sent" + diff --git a/statusstorage.py b/statusstorage.py new file mode 100755 index 0000000..55379a1 --- /dev/null +++ b/statusstorage.py @@ -0,0 +1,111 @@ +""" + Author: Justin Cappos + + Start Date: 14 Sept 2008 + + Description: + + This module stores status information about the sandbox. Use "read_status" + and "write_status" to set and check the status... + + This module works by creating a file with an name that indicates the status. + The order of operations is: find old file name(s), write new file, delete + old file(s). File names contain a timestamp so that one can tell when it + was last updated. The actual format is: "prefix-status-timestamp". + +""" + +# to store the current time... +import time + +# needed to get a lock +import threading + +# needed for listdir... +import os + +# To allow access to a real fileobject +# call type... +myfile = file + +statusfilenameprefix = None + +# This prevents writes to the nanny's status information after we want to stop +statuslock = threading.Lock() + +def init(sfnp): + global statusfilenameprefix + statusfilenameprefix = sfnp + + +# Write out a status that can be read by another process... +def write_status(status, mystatusfilenameprefix=None): + + if not mystatusfilenameprefix: + mystatusfilenameprefix = statusfilenameprefix + + # nothing set, nothing to do... + if not mystatusfilenameprefix: + return + + mystatusdir = os.path.dirname(mystatusfilenameprefix) + if mystatusdir == '': + mystatusdir = './' + else: + mystatusdir = mystatusdir+'/' + + # BUG: Is getting a directory list atomic wrt file creation / deletion? + # get the current file list... + # Fix. Need to prepend the directory name we're writing into... + existingfiles = os.listdir(mystatusdir) + + timestamp = time.time() + + # write the file + myfile(mystatusfilenameprefix+"-"+status+"-"+str(timestamp),"w").close() + + # remove the old files... + for filename in existingfiles: + if len(filename.split('-')) == 3 and filename.split('-')[0] == os.path.basename(mystatusfilenameprefix): + try: + os.remove(mystatusdir+filename) + except OSError, e: + if e[0] == 2: + # file not found, let's assume another instance removed it... + continue + + # otherwise, let's re-raise the error + raise + + +def read_status(mystatusfilenameprefix=None): + + if not mystatusfilenameprefix: + mystatusfilenameprefix = statusfilenameprefix + + # BUG: is getting a dir list atomic wrt file creation / deletion? + # get the current file list... + # Fix. Need to prepend the directory name we're writing into... + if os.path.dirname(mystatusfilenameprefix): + existingfiles = os.listdir(os.path.dirname(mystatusfilenameprefix)) + else: + existingfiles = os.listdir('.') + + latesttime = 0 + lateststatus = None + + # find the newest status update... + for filename in existingfiles: + if filename.split('-')[0] == mystatusfilenameprefix: + thisstatus = filename.split('-',2)[1] + thistime = float(filename.split('-',2)[2]) + + # is this the latest? + if thistime > latesttime: + latesttime = thistime + lateststatus = thisstatus + + return (lateststatus, latesttime) + + + diff --git a/statusstorage.pyc b/statusstorage.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45ae5c574aa9729abd47171c5889b66528a5889f GIT binary patch literal 2406 zcmcImUvC>l5TCuXFNx!jDk2RQ5UoN5ZfTr?ct8 zASux)sGu~UT+k^d)jXinU@;HrG$dW4(>j}}CDH+1EYowWU7<9j7nn3ymDcD5oUHnJ z-Ormm@3tcj7$I~+O8M=?4NSQ&zn@r_74krh$Hq2T({-wJ@=!UwFF*T2cJo6(#7Q@RjVjVJbJanh zr&i0IQmeFAt=Qq{P|KZZnK`|XN@t|i&TEo^PKK2qAEeXi{40y8DTh{`UCRW%vA1)n zWIscC)65OvMHXtL;X^s3m}Ut!YHz#bERK<>DN|ia)0bwfOTP{5$?n4!QZ#v5Ib~;U zd;g*IIm<#%z5Jm}b*`P3>}_ETJKpzqoD8qCGBJfyD8G^}1Mk#mEUl@MGliB3(seq| z<#eD6nK|}0RaWN8Iyo7q$USA3oUcmUMCGz7y3cfHTYF&wn5+%7obF&19@)A4FGv}5eIaAi7%&#r=$ox|I{;~U{ zEKS+wm|j=G#oYxzy8bVOy471MPHPRkzCw<;A2=tA!-2C!6Z-HAko!{Tw*u=Xg5JLD z9)PzWgHRBOl`s;uU_&&;O3Egl zX*1S+PxgALNqW7Q{Zx1#usgJXFO{zigFfc%#vd}lTh;6blMH=Ts?FwLW4;68_nCYI m@_NgD_CH?!T-dLw|1rMiW3w9|QP||aT7*7m1}jnXlfMC2z%#M{ literal 0 HcmV?d00001 diff --git a/tracebackrepy.py b/tracebackrepy.py new file mode 100755 index 0000000..d2dd446 --- /dev/null +++ b/tracebackrepy.py @@ -0,0 +1,230 @@ +""" +Author: Justin Cappos + +Start Date: September 17th, 2008 + +Description: +Module for printing clean tracebacks. It takes the python traceback and +makes the output look nicer so the programmer can tell what is happening... + +""" + + +# we'll print our own exceptions +import traceback +# This needs hasattr. I'll allow it... +traceback.hasattr = hasattr + +# and don't want traceback to use linecache because linecache uses open +import fakelinecache +traceback.linecache = fakelinecache + +# Need to be able to reference the last traceback... +import sys + +# Used to determine whether or not we use the service logger to log internal +# errors. Defaults to false. -Brent +servicelog = False + +# this is the directory where the node manager resides. We will use this +# when deciding where to write our service log. +logdirectory = None + + +# We need the service logger to log internal errors -Brent +import servicelogger + +# We need to be able to do a harshexit on internal errors +import harshexit + +# I'd like to know if it's a "safety concern" so I can tell the user... +# I'll import the module so I can check the exceptions +import safety_exceptions + +# needed to get the PID +import os + +# Armon: These set contains all the module's which are black-listed +# from the traceback, so that if there is an exception, they will +# not appear in the stack. +TB_SKIP_MODULES = ["repy.py","safe.py","virtual_namespace.py","namespace.py","emulcomm.py", + "emultimer.py","emulmisc.py","emulfile.py","nonportable.py","socket.py"] + + +# sets the user's file name. +# also sets whether or not the servicelogger is used. -Brent +def initialize(useservlog=False, logdir = '.'): + global servicelog + global logdirectory + servicelog = useservlog + logdirectory = logdir + + +# Public: this prints the previous exception in a readable way... +def handle_exception(): + """ + This is an example traceback: + --- + Uncaught exception! Following is a full traceback, and a user traceback. + The user traceback excludes non-user modules. The most recent call is displayed last. + + Full debugging traceback: + "repy.py", line 191, in main + "/Users/adadgar/Projects/seattle/trunk/test/virtual_namespace.py", line 116, in evaluate + "/Users/adadgar/Projects/seattle/trunk/test/safe.py", line 304, in safe_run + "dylink.repy", line 472, in + "dylink.repy", line 360, in dylink_dispatch + "dylink.repy", line 455, in evaluate + "/Users/adadgar/Projects/seattle/trunk/test/namespace.py", line 1072, in __do_func_call + "/Users/adadgar/Projects/seattle/trunk/test/namespace.py", line 1487, in wrapped_function + "/Users/adadgar/Projects/seattle/trunk/test/virtual_namespace.py", line 116, in evaluate + "/Users/adadgar/Projects/seattle/trunk/test/safe.py", line 304, in safe_run + "testxmlrpc_common", line 254, in + "/Users/adadgar/Projects/seattle/trunk/test/safe.py", line 174, in fnc + + User traceback: + "dylink.repy", line 472, in + "dylink.repy", line 360, in dylink_dispatch + "dylink.repy", line 455, in evaluate + "testxmlrpc_common", line 254, in + + Unsafe call: ('__import__',) + --- + """ + + # exc_info() gives the traceback (see the traceback module for info) + exceptiontype, exceptionvalue, exceptiontraceback = sys.exc_info() + + # We store a full traceback, and a "filtered" user traceback to help the user + full_tb = "" + filtered_tb = "" + + for tracebackentry in traceback.extract_tb(exceptiontraceback): + # the entry format is (filename, lineno, modulename, linedata) + # linedata is always empty because we prevent the linecache from working + # for safety reasons... + + # Check that this module is not black-listed + module = tracebackentry[0] + skip = False + + # Check if any of the forbidden modules are a substring of the module name + # e.g. if the name is /home/person/seattle/repy.py, we want to see that repy.py + # and skip this frame. + for forbidden in TB_SKIP_MODULES: + if forbidden in module: + skip = True + break + + # Construct a frame of output + stack_frame = ' "'+tracebackentry[0]+'", line '+str(tracebackentry[1])+", in "+str(tracebackentry[2])+"\n" + + # Always add to the full traceback + full_tb += stack_frame + + # If this module is not blacklisted, add it to the filtered traceback + if not skip: + filtered_tb += stack_frame + + + # Print some general info + print >> sys.stderr, "---\nUncaught exception! Following is a full traceback, and a user traceback.\n" \ + "The user traceback excludes non-user modules. The most recent call is displayed last.\n" + + # Print the full traceback first + print >> sys.stderr, "Full debugging traceback:\n",full_tb + + print >> sys.stderr, "User traceback:\n",filtered_tb + + + # When I try to print an Exception object, I get: + # "". I'm going to look for this and produce + # more sensible output if it happens. + + if exceptiontype is safety_exceptions.CheckNodeException: + print >> sys.stderr, "Unsafe call with line number / type:",str(exceptionvalue) + + elif exceptiontype is safety_exceptions.CheckStrException: + print >> sys.stderr, "Unsafe string on line number / string:",exceptionvalue + + elif exceptiontype is safety_exceptions.RunBuiltinException: + print >> sys.stderr, "Unsafe call:",exceptionvalue + + elif str(exceptiontype)[0] == '<': + print >> sys.stderr, "Exception (with "+str(exceptiontype)[1:-1]+"):", exceptionvalue + else: + print >> sys.stderr, "Exception (with type "+str(exceptiontype)+"):", exceptionvalue + + # Print another line so that the end of the output is clear + print >> sys.stderr, "---" + + +def handle_internalerror(error_string, exitcode): + """ + + Brent Couvrette + + When an internal error happens in repy it should be handled differently + than normal exceptions, because internal errors could possibly lead to + security vulnerabilities if we aren't careful. Therefore when an internal + error occurs, we will not return control to the user's program. Instead + we will log the error to the service log if available, then terminate. + + error_string - The error string to be logged if logging is enabled. + exitcode - The exit code to be used in the harshexit call. + + None + + The program will exit. + + Shouldn't return because harshexit will always be called. + """ + + try: + print >> sys.stderr, "Internal Error" + if not servicelog: + # If the service log is disabled, lets just exit. + harshexit.harshexit(exitcode) + else: + # Internal errors should not be given to the user's code to be caught, + # so we print the exception to the service log and exit. -Brent + exceptionstring = "[INTERNAL ERROR] " + error_string + '\n' + for line in traceback.format_stack(): + exceptionstring = exceptionstring + line + + # This magic is determining what directory we are in, so that can be + # used as an identifier in the log. In a standard deployment this + # should be of the form vXX where XX is the vessel number. We don't + # want any exceptions here preventing us from exitting, so we will + # wrap this in a try-except block, and use a default value if we fail. + try: + identifier = os.path.basename(os.getcwd()) + except: + # We use a blank except because if we don't, the user might be able to + # handle the exception, which is unacceptable on internal errors. Using + # the current pid should avoid any attempts to write to the same file at + # the same time. + identifier = str(os.getpid()) + else: + if identifier == '': + # If the identifier is blank, use the PID. + identifier = str(os.getpid()) + + # Again we want to ensure that even if we fail to log, we still exit. + try: + servicelogger.multi_process_log(exceptionstring, identifier, logdirectory) + except Exception, e: + # if an exception occurs, log it (unfortunately, to the user's log) + print 'Inner abort of servicelogger' + print e,type(e) + traceback.print_exc() + finally: + harshexit.harshexit(exitcode) + + except Exception, e: + # if an exception occurs, log it (unfortunately, to the user's log) + print 'Outer abort of servicelogger' + print e,type(e) + traceback.print_exc() + finally: + harshexit.harshexit(842) diff --git a/tracebackrepy.pyc b/tracebackrepy.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abe291ba1f45443a116194b11428c5458430ff3a GIT binary patch literal 5591 zcmc&&+in}l5v}1(5_R=O791E!+c9DpkwwevTLd9kucfu+?8*jZLx!@(U^vqxM;y-3 z-93`Hke<9iUh@k{UV{MngnU7MVIT9J_f1aqa7am618c;KmB?mKb#--BS9jH^DgJx9 znl4>`>C@n^ir-K1)&D>f5bdIMC@4_m(5^$WK+++%kQ0lvTO^4Oh^3rZrrk2h3VCBZ z(VNKWRoboQ{Yl!L%==TcJC*mRX?L1}A_b6Erl3Lrgp5-FA()FPQ`CZ-8I~V_bB4Yt z(C#b+v-AzLougpRcFytA+l%MelyA|9>Z8=O6SXRyrP_qCc z+p=e5w;`2Sd0^U0;?C{c53AL6seKjpOqj&0)i0AEjihKMO7v71LsColk@R9=l;_Ka z=Xdn75Mt8^<8`DKrY%LUkEKs=2rmvqwL2V7QqxO~h?1lu;?T$XI>{!gq@}!W7ZjhD zkWnNK+MW@i7Ht?HW30ctyj-n@|H0pu9)qb$_WH}czU^zTDY<_U^!-qo)QjBM>q^~& zvOF+_fm4J@5M(!v{G{8>8Ti?R(1Hm$LpRj^$>(MmS&1_&F^PMLGF~GiO@O2mzatGM z7za43Uw-^wg?7e_{G~T!e!f9UI%J); z!ZM~mrnvUQsY^C`cml^SOx0^q3%UqUj`!bv_WVQ;&WCYkd3s}Vx@+vuoXgF8(fELOCkzm zDOP^7vLp~;yIvS)47FF#RoA@03tFD4y->*>F0HO<=@}Erno()osTrwF?fZh%Q1Hrq zD_HJ(QHp^5!NLd8Fqikq?YmYUlU=Bg$qV|hqO;6K<&1Y9+_8*n+2;NBy^NpSzik;a z87^<7XZ-ei`S0Gl_y1XRGIZzKuG}7|>$*YWHq+R5*|#6OTi+}D?!yOG;{(O%C$QrA z(0;J=pWe}Y-VVEw>S2G7?2ZEZxL#7TORVi9&945e*`R9>;IK%^@dujaW(cM1`E? zHPfp4FKAiDM+=S|>`n*Fj}aR6&!Fe~gQJBsXT7bjW6p`~69=JbXMrKpE&(l3$PrZCX1o~X7x=zKMZLt%5&m>F(3mnTL3;?0RBDvIm6n$GUMX)H*>_u z6Aanl+jZLMUtwUoH^#7&I}^+aTnD_o6JI9Bad7&0U~)7NU!&JRuE4zv`~J@5!&wd} z{G9p=gi>N}+Adilc=274oSk@uN;tWlDyi?t0R(qYjXrq_IQ_)TF^tCYPoA>c=Xgyg zcIfpEAoVFrU!N4S;bdaQez+Lay;#P14Y}KkwubsA?k&RWT2{+uIOH?M=uXDk(tKuqh6t@UZL+Mx{#P#-XG}2uC+j5*6@ii>$cJs>Fv-y~F|zLV(sdVSH;+ z6+;!=$pk<;E3G*j+|VfEZG{c_o3S=f&kD<@qlUC~cqV)>6BD@zMY#@q-W1-x7e-v^ zE-@8cyXfTs+}*B+@>+QkOy%o$dYI|%2uEC$bH4@g)7+YiYt>^_8ZZ%_xwm=_6H zs=(fd#z!O+0~vUPfq2dYz&K2YE=YTAPoc)O3_yWasg0rMGOf0fn8Ebgb_fX@a5=zv zHo|#xutnL1vLGv?Zg(}~+O`LW-8IPmze zqLwuNK~U%1mp}Kyz~-J)irNHb->$gWW?yRU!h$->w~7osW!JLViUWat^~BM9kr%V? zLQbBnL7vWzh)uwwyK0eymBvsadxgQ7C~SsOS={jBWTA^tbdZJeNMu$1Gw4|+nlDn( znMN*}E6q3;oSCtrbHyo^FBPT=^F6WX3e zd8zECH}S5G?e#ssE$z+@pECcmi!=YTQ~5$zC)95+U2wyPvVM&lMi6z*eDz;&a|g{R raogTUe8 Date: Sun, 28 Sep 2014 14:14:32 -0400 Subject: [PATCH 21/33] Revert "Add exception for socket timeout bug" This reverts commit ce451874058e5cb14812ee052ab381158326c483. --- DORadvertise.repy | 216 -------- DORadvertise_repy.py | 234 -------- DORadvertise_repy.pyc | Bin 6028 -> 0 bytes centralizedadvertise.repy | 5 +- centralizedadvertise.repy~ | 55 -- dorputget_new.py | 155 ------ harshexit.py | 174 ------ harshexit.pyc | Bin 2776 -> 0 bytes integrationtestlib.pyc | Bin 3181 -> 0 bytes irc_seattlebot.pyc | Bin 2519 -> 0 bytes nonportable.py | 1028 ------------------------------------ nonportable.pyc | Bin 18576 -> 0 bytes portability | 1 - repy_constants.py | 48 -- repy_constants.pyc | Bin 745 -> 0 bytes repy_v1 | 1 - repyhelper.py | 495 ----------------- repyhelper.pyc | Bin 14404 -> 0 bytes repyportability.py | 313 ----------- repyportability.pyc | Bin 8156 -> 0 bytes safe.py | 698 ------------------------ safe.pyc | Bin 24838 -> 0 bytes seattle_gmail_info | 1 - seattle_gmail_info~ | 1 - send_gmail.pyc | Bin 5653 -> 0 bytes send_gmail.py~ | 187 ------- statusstorage.py | 111 ---- statusstorage.pyc | Bin 2406 -> 0 bytes tracebackrepy.py | 230 -------- tracebackrepy.pyc | Bin 5591 -> 0 bytes 30 files changed, 1 insertion(+), 3952 deletions(-) delete mode 100644 DORadvertise.repy delete mode 100644 DORadvertise_repy.py delete mode 100644 DORadvertise_repy.pyc delete mode 100644 centralizedadvertise.repy~ delete mode 100644 dorputget_new.py delete mode 100644 harshexit.py delete mode 100644 harshexit.pyc delete mode 100644 integrationtestlib.pyc delete mode 100644 irc_seattlebot.pyc delete mode 100755 nonportable.py delete mode 100644 nonportable.pyc delete mode 160000 portability delete mode 100644 repy_constants.py delete mode 100644 repy_constants.pyc delete mode 160000 repy_v1 delete mode 100644 repyhelper.py delete mode 100644 repyhelper.pyc delete mode 100644 repyportability.py delete mode 100644 repyportability.pyc delete mode 100644 safe.py delete mode 100644 safe.pyc delete mode 100644 seattle_gmail_info delete mode 100644 seattle_gmail_info~ delete mode 100644 send_gmail.pyc delete mode 100644 send_gmail.py~ delete mode 100755 statusstorage.py delete mode 100644 statusstorage.pyc delete mode 100755 tracebackrepy.py delete mode 100644 tracebackrepy.pyc diff --git a/DORadvertise.repy b/DORadvertise.repy deleted file mode 100644 index a7c5944..0000000 --- a/DORadvertise.repy +++ /dev/null @@ -1,216 +0,0 @@ -""" -Author: Conrad Meyer - -Start Date: Wed Dec 9 2009 - -Description: -Advertisements to the Digital Object Registry run by CNRI. - -""" - - - - -include sockettimeout.repy -include httpretrieve.repy -include xmlparse.repy - - - - -DORadvertise_FORM_LOCATION = "http://geni.doregistry.org/SeattleGENI/HashTable" - - - - -class DORadvertise_XMLError(Exception): - """ - Exception raised when the XML recieved from the Digital Object Registry - server does not match the structure we expect. - """ - pass - - - - -class DORadvertise_BadRequest(Exception): - """ - Exception raised when the Digital Object Registry interface indigates we - have made an invalid request. - """ - - - def __init__(self, errno, errstring): - self.errno = errno - self.errstring = errstring - Exception.__init__(self, "Bad DOR request (%s): '%s'" % (str(errno), errstring)) - - - - -def DORadvertise_announce(key, value, ttlval, timeout=None): - """ - - Announce a (key, value) pair to the Digital Object Registry. - - - key: - The new key the value should be stored under. - - value: - The value to associate with the given key. - - ttlval: - The length of time (in seconds) to persist this key <-> value - association in DHT. - - timeout: - The number of seconds to spend on this operation before failing - early. - - - xmlparse_XMLParseError if the xml returned isn't parseable by xmlparse. - DORadvertise_XMLError if the xml response structure does not correspond - to what we expect. - DORadvertise_BadRequest if the response indicates an error. - Any exception httpretrieve_get_string() throws (including timeout errors). - - - The key <-> value association gets stored in openDHT for a while. - - - None. - """ - - post_params = {'command': 'announce', 'key': key, 'value': value, - 'lifetime': str(int(ttlval))} - - _DORadvertise_command(post_params, timeout=timeout) - - return None - - - - - -def DORadvertise_lookup(key, maxvals=100, timeout=None): - """ - - Retrieve a stored value from the Digital Object Registry. - - - key: - The key the value is stored under. - - maxvals: - The maximum number of values stored under this key to - return to the caller. - - timeout: - The number of seconds to spend on this operation before failing - early. If not specified, the default is set to the default - timeout value for the http library (30 seconds). - - - xmlparse_XMLParseError if the xml returned isn't parseable by xmlparse. - DORadvertise_XMLError if the xml response structure does not correspond - to what we expect. - DORadvertise_BadRequest if the response indicates an error. - Any exception httpretrieve_get_string() throws (including timeout errors). - - - None. - - - The value stored in the Digital Object Registry at key. - """ - - post_params = {'command': 'lookup', 'key': key, 'maxvals': str(maxvals)} - - return _DORadvertise_command(post_params, timeout=timeout) - - - -def _DORadvertise_command(parameters, timeout=None): - # Internal helper function; calls the remote command, and returns - # the results we can glean from it. - - # If there is a timeout, use it! - if timeout != None: - post_result = httpretrieve_get_string(DORadvertise_FORM_LOCATION, \ - postdata=parameters, timeout=timeout, \ - httpheaders={"Content-Type": "application/x-www-form-urlencoded"}) - else: - post_result = httpretrieve_get_string(DORadvertise_FORM_LOCATION, \ - postdata=parameters, \ - httpheaders={"Content-Type": "application/x-www-form-urlencoded"}) - - - # Parse the result to check for success. Throw several exceptions to - # ensure the XML we're reading makes sense. - xmltree = xmlparse_parse(post_result) - - if xmltree.tag_name != "HashTableService": - raise DORadvertise_XMLError( - "Root node error. Expected: 'HashTableService', " + - "got: '%s'" % xmltree.tag_name) - - if xmltree.children is None: - raise DORadvertise_XMLError("Root node contains no children nodes.") - - # We expect to get an error code, an error string, and possibly some - # values from the server. - error_msg = None - error = None - values = None - - numxmlchildren = len(xmltree.children) - if numxmlchildren not in [2, 3]: - raise DORadvertise_XMLError("Root XML node contains inappropriate " + \ - "number of child nodes.") - - for xmlchild in xmltree.children: - # Read the numeric error code. - if xmlchild.tag_name == "status" and xmlchild.content is not None: - if error is not None: - raise DORadvertise_XMLError("XML contains multiple status tags") - error = int(xmlchild.content.strip()) - - # String error message (description:status as strerror:errno). - elif xmlchild.tag_name == "description": - if error_msg is not None: - raise DORadvertise_XMLError("XML contains multiple description tags") - error_msg = xmlchild.content - - # We found a tag. Let's try and get some values. - elif xmlchild.tag_name == "values" and xmlchild.children is not None: - if values is not None: - raise DORadvertise_XMLError("XML contains multiple values tags") - - values = [] - for valuenode in xmlchild.children: - if valuenode.tag_name != "value": - raise DORadvertise_XMLError( - "Child tag of ; expected: '', got: '<%s>'" % \ - valuenode.tag_name) - - content = valuenode.content - if content is None: - content = "" - - values.append(content) - - # Check for tags we do not expect. - elif xmlchild.tag_name not in ("status", "description", "values"): - raise DORadvertise_XMLError("Unexpected tag '" + \ - str(xmlchild.tag_name) + "' while parsing response.") - - if error is not 0: - raise DORadvertise_BadRequest(error, error_msg) - - # This happens when the server returns - if values is None: - return [] - - return values - diff --git a/DORadvertise_repy.py b/DORadvertise_repy.py deleted file mode 100644 index 3c21e17..0000000 --- a/DORadvertise_repy.py +++ /dev/null @@ -1,234 +0,0 @@ -### Automatically generated by repyhelper.py ### /home/integrationtester/cron_tests/dorputget/DORadvertise.repy - -### THIS FILE WILL BE OVERWRITTEN! -### DO NOT MAKE CHANGES HERE, INSTEAD EDIT THE ORIGINAL SOURCE FILE -### -### If changes to the src aren't propagating here, try manually deleting this file. -### Deleting this file forces regeneration of a repy translation - - -from repyportability import * -from repyportability import _context -import repyhelper -mycontext = repyhelper.get_shared_context() -callfunc = 'import' -callargs = [] - -""" -Author: Conrad Meyer - -Start Date: Wed Dec 9 2009 - -Description: -Advertisements to the Digital Object Registry run by CNRI. - -""" - - - - -repyhelper.translate_and_import('sockettimeout.repy') -repyhelper.translate_and_import('httpretrieve.repy') -repyhelper.translate_and_import('xmlparse.repy') - - - - -DORadvertise_FORM_LOCATION = "http://geni.doregistry.org/SeattleGENI/HashTable" - - - - -class DORadvertise_XMLError(Exception): - """ - Exception raised when the XML recieved from the Digital Object Registry - server does not match the structure we expect. - """ - pass - - - - -class DORadvertise_BadRequest(Exception): - """ - Exception raised when the Digital Object Registry interface indigates we - have made an invalid request. - """ - - - def __init__(self, errno, errstring): - self.errno = errno - self.errstring = errstring - Exception.__init__(self, "Bad DOR request (%s): '%s'" % (str(errno), errstring)) - - - - -def DORadvertise_announce(key, value, ttlval, timeout=None): - """ - - Announce a (key, value) pair to the Digital Object Registry. - - - key: - The new key the value should be stored under. - - value: - The value to associate with the given key. - - ttlval: - The length of time (in seconds) to persist this key <-> value - association in DHT. - - timeout: - The number of seconds to spend on this operation before failing - early. - - - xmlparse_XMLParseError if the xml returned isn't parseable by xmlparse. - DORadvertise_XMLError if the xml response structure does not correspond - to what we expect. - DORadvertise_BadRequest if the response indicates an error. - Any exception httpretrieve_get_string() throws (including timeout errors). - - - The key <-> value association gets stored in openDHT for a while. - - - None. - """ - - post_params = {'command': 'announce', 'key': key, 'value': value, - 'lifetime': str(int(ttlval))} - - _DORadvertise_command(post_params, timeout=timeout) - - return None - - - - - -def DORadvertise_lookup(key, maxvals=100, timeout=None): - """ - - Retrieve a stored value from the Digital Object Registry. - - - key: - The key the value is stored under. - - maxvals: - The maximum number of values stored under this key to - return to the caller. - - timeout: - The number of seconds to spend on this operation before failing - early. If not specified, the default is set to the default - timeout value for the http library (30 seconds). - - - xmlparse_XMLParseError if the xml returned isn't parseable by xmlparse. - DORadvertise_XMLError if the xml response structure does not correspond - to what we expect. - DORadvertise_BadRequest if the response indicates an error. - Any exception httpretrieve_get_string() throws (including timeout errors). - - - None. - - - The value stored in the Digital Object Registry at key. - """ - - post_params = {'command': 'lookup', 'key': key, 'maxvals': str(maxvals)} - - return _DORadvertise_command(post_params, timeout=timeout) - - - -def _DORadvertise_command(parameters, timeout=None): - # Internal helper function; calls the remote command, and returns - # the results we can glean from it. - - # If there is a timeout, use it! - if timeout != None: - post_result = httpretrieve_get_string(DORadvertise_FORM_LOCATION, \ - postdata=parameters, timeout=timeout, \ - httpheaders={"Content-Type": "application/x-www-form-urlencoded"}) - else: - post_result = httpretrieve_get_string(DORadvertise_FORM_LOCATION, \ - postdata=parameters, \ - httpheaders={"Content-Type": "application/x-www-form-urlencoded"}) - - - # Parse the result to check for success. Throw several exceptions to - # ensure the XML we're reading makes sense. - xmltree = xmlparse_parse(post_result) - - if xmltree.tag_name != "HashTableService": - raise DORadvertise_XMLError( - "Root node error. Expected: 'HashTableService', " + - "got: '%s'" % xmltree.tag_name) - - if xmltree.children is None: - raise DORadvertise_XMLError("Root node contains no children nodes.") - - # We expect to get an error code, an error string, and possibly some - # values from the server. - error_msg = None - error = None - values = None - - numxmlchildren = len(xmltree.children) - if numxmlchildren not in [2, 3]: - raise DORadvertise_XMLError("Root XML node contains inappropriate " + \ - "number of child nodes.") - - for xmlchild in xmltree.children: - # Read the numeric error code. - if xmlchild.tag_name == "status" and xmlchild.content is not None: - if error is not None: - raise DORadvertise_XMLError("XML contains multiple status tags") - error = int(xmlchild.content.strip()) - - # String error message (description:status as strerror:errno). - elif xmlchild.tag_name == "description": - if error_msg is not None: - raise DORadvertise_XMLError("XML contains multiple description tags") - error_msg = xmlchild.content - - # We found a tag. Let's try and get some values. - elif xmlchild.tag_name == "values" and xmlchild.children is not None: - if values is not None: - raise DORadvertise_XMLError("XML contains multiple values tags") - - values = [] - for valuenode in xmlchild.children: - if valuenode.tag_name != "value": - raise DORadvertise_XMLError( - "Child tag of ; expected: '', got: '<%s>'" % \ - valuenode.tag_name) - - content = valuenode.content - if content is None: - content = "" - - values.append(content) - - # Check for tags we do not expect. - elif xmlchild.tag_name not in ("status", "description", "values"): - raise DORadvertise_XMLError("Unexpected tag '" + \ - str(xmlchild.tag_name) + "' while parsing response.") - - if error is not 0: - raise DORadvertise_BadRequest(error, error_msg) - - # This happens when the server returns - if values is None: - return [] - - return values - - -### Automatically generated by repyhelper.py ### /home/integrationtester/cron_tests/dorputget/DORadvertise.repy diff --git a/DORadvertise_repy.pyc b/DORadvertise_repy.pyc deleted file mode 100644 index 6280023401ad2a8691242f5f4d6fbfd0be6d3253..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6028 zcmeHL-EJGl6`oy6v?NoZVmq?)gLa(O5gk}!iUuy=2tpm(O$;P*uu_{=KtQZ^hvZoM zL(dE?36OeG`@;7Hdei$pLLZWoPnGn0w3aOO`VlNN@^&27!#a<{>C>}P%6Cw8I zL^>z-=EVT*CXb&J>AZM?1vCkr=JTC%+({S2-hxOMg<2H-K%`6JsUT{<+glRp`FgZ9 z89h&&(`E4#k}Qk#4eqwZQ^?}mEBvZW<2#)^`jNm)2`^6)V9b2L_bZ%snGg>`Xesme5M|n1ijjcvLs^;D5b_c4^ z8)<3OKwD>yH%c?;?x@(gOnr85`+oQH*baB&{Y<&Hu;}iCC{7e~Ge8jis{4NN4GmHbJdOahuRL+K{px3#Xwms zFSIL7&n8BXT=#q38MTq6Hb%!0N%A4Sb`Fo`Tr+T&(U_U~X`Dvt(O6k`R>w5?c^xAP z;FNm}Qg%pW3p{>M*)LJ6)bKacj}wJfst2)yat{?q4dcfODo&M*3%q+AXF7$sSU^&4 zLR+HmJid4EwKwrN3B(CBj7G?fKsb&@3_@l%M-vc!c-s%f>XVQo*Y9E5z5@c#f`nRX zPO@{&UcV*RuGzH?F>>@!#=!oIz6IOsV!*wLt5r7FFcXYSLSmNp2GYM(SwAATh=|5( zv-x=t|Ag34EQ??fpANB)_(2IW}qJfzQCg* z0qn%|votaVryR<%FA20|M;E{^pnGbs({4w~SYR*&)s`jRT;KBI%o zv-KyzS-!y7Z^e$d*^w$z2^!3<1oM~?d0+LR6WNb-h8UXRtYVY-J#J3Is?vOY zywI8O6`eDDFi4H9!UTAW-~|{LP`0+knuCckiH;Sye@yTF;l%7_5U@UDW=YvmSy(b( zjq#HMASsPe^g69X$5s!Av70>wB;m>QAe+K$&BoIJ3qrtz5fG3EkSXCOi4H+a3qDCuXmEN8Zr@)qDLuPN_ z9vL1yZ0)^p0+_FWBS@GIv2-}pneuca)_O&9yDTs)B|~9c#U7t`Ntx$ykuofTDI+R- zJWtJZUy;Z1)GFWD_OR7LZ1XCd8Jr;WW)xe~B}gIejMvc$dJ&~6oYDAbi7rx~U=Q%% zdT@t0w^IaswcvL)m8rGCxJQ5`-oFy8hAW{?(dKiF_Ar6QpP{3X;>|!KFwq}n1dyi< z%H)89fJTPNV5dXSZvXruB!rzfVT1K5O!9#9-%=oN<^Kf+XCV*>^8)P4<0G)OXAuN1 zbUx1I6iHaTSt3*5a%B~K6e+R_{Tk4dILjtzTY1x2j#Eyr0<$2*{XQp%fW1WbwMuU= zGNq~?k242Q0yYAA{h3pZ&TOKR)B`dH8X+46zRdK#iOsR>{QRRDVXLtIKTN)EnDm$i zOKr|T9igX>pvuhwl4P%!rMx&G`1}SqQjlh4c`zPDZ8RCJ82PH`V5H&gKxWWWa14?^6wMR_h%_#-H9J`sWy1=V`64EQL1F%MS(*7$Gxi}?)as_m7 z^hf5~6nDS*jW~s5i@d8#Y|el2nzyRK)rl%;(t5$N$$S(AwLqO&7Xlfi5(HyIf`KiJ_l{ z*vSrxYMj>qvF-^|3)t zos_9cEydy;s+1INefM~z97W@#c(H>zflid|VlXOEC@8R~%I8pypz451ai{$Lg0&mc z(uHtPI)CG|*JdbCZ6?+Q6(3}Rv@12Z2@q|&p(*;1BOo9rYpA@zoG0safvuTxWcW&9 zS22Bh^fASX9OII8u^W4h5UB~39A}swA({3Qq>BfZlPL(o1qtZQegN7pUiI88zmzc0 zpYW{x7q6;P0OG~joc#q>-616q5xQ<{`YBtVR0SATu^-%mX1!83ui33Nq)yNine5kv zFV^6uKvdT3+ZbH)IW6BBD2J}o^9@bw7>uXt=1VuNO)rKj!=>0!rIl}V!x*K4x){qU%8w}kQJVF3Mqb-T zHDl6=4T6vjY~sRZxJ34F$LjeCZp4^C2Qzz0M3!csjL6?3!bYE2pz+$Y&!TSn%I6lz dVu|k(G}yui+HNlg_{=xwSC>}Xt@ZW?-vOOrHJJba diff --git a/centralizedadvertise.repy b/centralizedadvertise.repy index 41b4e74..0881b1e 100644 --- a/centralizedadvertise.repy +++ b/centralizedadvertise.repy @@ -16,16 +16,13 @@ servername = "satya.cs.washington.edu" serverport = 10101 def centralizedadvertise_announce(key, value, ttlval): -#added socket time out exception + sockobj = timeout_openconn(servername,serverport, timeout=10) try: session_sendmessage(sockobj, "PUT|"+str(key)+"|"+str(value)+"|"+str(ttlval)) response = session_recvmessage(sockobj) if response != 'OK': raise Exception, "Centralized announce failed '"+response+"'" - except socket.timeout: - print "Socket timed out '"+response+"'" - finally: # BUG: This raises an error right now if the call times out ( #260 ) # This isn't a big problem, but it is the "wrong" exception diff --git a/centralizedadvertise.repy~ b/centralizedadvertise.repy~ deleted file mode 100644 index 9db33f4..0000000 --- a/centralizedadvertise.repy~ +++ /dev/null @@ -1,55 +0,0 @@ -""" -Author: Justin Cappos - -Start Date: July 8, 2008 - -Description: -Advertisements to a central server (similar to openDHT) - - -""" - -include session.repy -# I'll use socket timeout to prevent hanging when it takes a long time... -include sockettimeout.repy -servername = "satya.cs.washington.edu" -serverport = 10101 - -def centralizedadvertise_announce(key, value, ttlval): -#added socket time out exception and commented previous code - sockobj = timeout_openconn(servername,serverport, timeout=10) - try: - session_sendmessage(sockobj, "PUT|"+str(key)+"|"+str(value)+"|"+str(ttlval)) - response = session_recvmessage(sockobj) - if response != 'OK': - raise Exception, "Centralized announce failed '"+response+"'" - except socket.timeout: - print "Socket timed out '"+response+"'" - - finally: - # BUG: This raises an error right now if the call times out ( #260 ) - # This isn't a big problem, but it is the "wrong" exception - sockobj.close() - - return True - - - - -def centralizedadvertise_lookup(key, maxvals=100): - sockobj = timeout_openconn(servername,serverport, timeout=10) - try: - session_sendmessage(sockobj, "GET|"+str(key)+"|"+str(maxvals)) - recvdata = session_recvmessage(sockobj) - # worked - if recvdata.endswith('OK'): - return recvdata[:-len('OK')].split(',') - raise Exception, "Centralized lookup failed" - - finally: - # BUG: This raises an error right now if the call times out ( #260 ) - # This isn't a big problem, but it is the "wrong" exception - sockobj.close() - - - diff --git a/dorputget_new.py b/dorputget_new.py deleted file mode 100644 index c110155..0000000 --- a/dorputget_new.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/python -""" - - dorputget.py - - - December 17, 2008 - - - ivan@cs.washington.edu - Ivan Beschastnikh - - - Attempt to put a (k,v) into DO registry and then get it back. On error - send an email to some folks. - - - Modify the following global var params to have this script functional: - - notify_list, a list of strings with emails denoting who will be - emailed when something goes wrong - - - GMAIL_USER and GMAIL_PWD environment variables: the username and - password of the gmail user who will be sending the email to the - emails in the notify_list (see crontab line below). - - This script takes no arguments. A typical use of this script is to - have it run periodically using something like the following crontab line: - 7 * * * * export GMAIL_USER='..' && export GMAIL_PWD='..' && /usr/bin/python /home/seattle/dorputget.py > /home/seattle/cron_log.dorputget -""" - -import time -import os -import socket -import sys -import traceback -import threading -import random - -import send_gmail -import integrationtestlib -import repyhelper - -repyhelper.translate_and_import("/home/integrationtester/cron_tests/dorputget/DORadvertise.repy") - -# Armon: This is to replace using the time command with getruntime -import nonportable - -# event for communicating when the lookup is done or timedout -lookup_done_event = threading.Event() - - - -def lookup_timedout(): - """ - - Waits for lookup_done_event and notifies the folks on the - notify_list (global var) of the lookup timeout. - - - None. - - - None. - - - Sends an email to the notify_list folks - - - None. - """ - integrationtestlib.log("in lookup_timedout()") - notify_msg = "DOR lookup failed -- lookup_timedout() fired after 60 seconds." - subject = "DOR with repy test failed" - - # wait for the event to be set, timeout after 30 minutes - wait_time = 1800 - tstamp_before_wait = nonportable.getruntime() - lookup_done_event.wait(wait_time) - tstamp_after_wait = nonportable.getruntime() - - t_waited = tstamp_after_wait - tstamp_before_wait - if abs(wait_time - t_waited) < 5: - notify_msg += " And lookup stalled for over 30 minutes (max timeout value)." - else: - notify_msg += " And lookup stalled for " + str(t_waited) + " seconds" - - integrationtestlib.notify(notify_msg,subject ) - return - -def main(): - """ - - Program's main. - - - None. - - - All exceptions are caught. - - - None. - - - None. - """ - # setup the gmail user/password to use when sending email - success,explanation_str = send_gmail.init_gmail() - if not success: - integrationtestlib.log(explanation_str) - sys.exit(0) - - integrationtestlib.notify_list.append("cemeyer@u.washington.edu") - - key = str(random.randint(4,2**30)) - value = str(random.randint(4,2**30)) - ttlval = 60 - subject = "DOR with repy test failed" - - - # put(key,value) with ttlval into DOR - integrationtestlib.log("calling DORadvertise_announce(key: " + str(key) + ", val: " + str(value) + ", ttl: " + str(ttlval) + ")") - try: - DORadvertise_announce(key, value, ttlval) - except: - message = "DORadvertise_lookup() failed.\nFailed while doing DORadvertise_announce(). " - message = message + "Anouncing with key: " + key + ", value: " + value + ", ttlval: " + str(ttlval) - integrationtestlib.handle_exception("DORadvertise_announce() failed", subject) - print message - sys.exit(0) - - # a 60 second timer to email the notify_list on slow lookups - lookup_timedout_timer = threading.Timer(60, lookup_timedout) - # start the lookup timer - lookup_timedout_timer.start() - - # get(key) from DOR - integrationtestlib.log("calling DORadvertise_lookup(key: " + str(key) + ")") - try: - ret_value = DORadvertise_lookup(key) - # TODO: check the return value as well - # ret_value = int(ret_value[0]) - except: - message = "DORadvertise_lookup() failed.\nFailed while doing DORadvertise_lookup(). " - message = message + "Looking up with key: " + key - integrationtestlib.handle_exception(message, subject) - sys.exit(0) - - lookup_timedout_timer.cancel() - lookup_done_event.set() - return - -if __name__ == "__main__": - main() - diff --git a/harshexit.py b/harshexit.py deleted file mode 100644 index 76bf867..0000000 --- a/harshexit.py +++ /dev/null @@ -1,174 +0,0 @@ - -# harshexit module -- Should be renamed, but I'm not sure what to. -# Provides these functions: -# portablekill: kill a function by pid -# harshexit: die, and do some things depending on the error code -# init_ostype: sets the module globals ostype and osrealtype - -# used to get information about the system we're running on -import platform -import os -import sys - -# needed for signal numbers -import signal - -# needed for changing polling constants on the Nokia N800 -import repy_constants - -# Needed for kill_process; This will fail on non-windows systems -try: - import windows_api -except: - windows_api = None - -# need for status retrieval -import statusstorage - -# This prevents writes to the nanny's status information after we want to stop -statuslock = statusstorage.statuslock - - - -ostype = None -osrealtype = None - - -# this indicates if we are exiting. Wrapping in a list to prevent needing a -# global (the purpose of this is described below) -statusexiting = [False] - - - -class UnsupportedSystemException(Exception): - pass - - - -def portablekill(pid): - global ostype - global osrealtype - - if ostype == None: - init_ostype() - - if ostype == 'Linux' or ostype == 'Darwin': - try: - os.kill(pid, signal.SIGTERM) - except: - pass - - try: - os.kill(pid, signal.SIGKILL) - except: - pass - - elif ostype == 'Windows' or ostype == 'WindowsCE': - # Use new api - windows_api.kill_process(pid) - - else: - raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" - - - -# exit all threads -def harshexit(val): - global ostype - global osrealtype - - if ostype == None: - init_ostype() - - # The problem is that there can be multiple calls to harshexit before we - # stop. For example, a signal (like we may send to kill) may trigger a - # call. As a result, we block all other status writers the first time this - # is called, but don't later on... - if not statusexiting[0]: - - # do this once (now) - statusexiting[0] = True - - # prevent concurrent writes to status info (acquire the lock to stop others, - # but do not block... - statuslock.acquire() - - # we are stopped by the stop file watcher, not terminated through another - # mechanism - if val == 4: - # we were stopped by another thread. Let's exit - pass - - # Special Termination signal to notify the NM of excessive threads - elif val == 56: - statusstorage.write_status("ThreadErr") - - elif val == 44: - statusstorage.write_status("Stopped") - - else: - # generic error, normal exit, or exitall in the user code... - statusstorage.write_status("Terminated") - - # We intentionally do not release the lock. We don't want anyone else - # writing over our status information (we're killing them). - - - if ostype == 'Linux': - # The Nokia N800 refuses to exit on os._exit() by a thread. I'm going to - # signal our pid with SIGTERM (or SIGKILL if needed) - portablekill(os.getpid()) -# os._exit(val) - elif ostype == 'Darwin': - os._exit(val) - elif ostype == 'Windows' or ostype == 'WindowsCE': - # stderr is not automatically flushed in Windows... - sys.stderr.flush() - os._exit(val) - else: - raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" - - - -# Figure out the OS type -def init_ostype(): - global ostype - global osrealtype - - # Detect whether or not it is Windows CE/Mobile - if os.name == 'ce': - ostype = 'WindowsCE' - return - - # figure out what sort of witch we are... - osrealtype = platform.system() - - # The Nokia N800 (and N900) uses the ARM architecture, - # and we change the constants on it to make disk checks happen less often - if platform.machine().startswith('armv'): - if osrealtype == 'Linux' or osrealtype == 'Darwin' or osrealtype == 'FreeBSD': - repy_constants.CPU_POLLING_FREQ_LINUX = repy_constants.CPU_POLLING_FREQ_WINCE; - repy_constants.RESOURCE_POLLING_FREQ_LINUX = repy_constants.RESOURCE_POLLING_FREQ_WINCE; - - if osrealtype == 'Linux' or osrealtype == 'Windows' or osrealtype == 'Darwin': - ostype = osrealtype - return - - # workaround for a Vista bug... - if osrealtype == 'Microsoft': - ostype = 'Windows' - return - - if osrealtype == 'FreeBSD': - ostype = 'Linux' - return - - if osrealtype.startswith('CYGWIN'): - # I do this because ps doesn't do memory info... They'll need to add - # pywin to their copy of cygwin... I wonder if I should detect its - # abscence and tell them (but continue)? - ostype = 'Windows' - return - - ostype = 'Unknown' - diff --git a/harshexit.pyc b/harshexit.pyc deleted file mode 100644 index 09de775c10e2605810893a9762a8ab7f3701a7ee..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2776 zcmcJR&u<$=6vyA}uI<=yezZW6LJt8&s`;UYBZ?|S>$+7$>Xvn)v_7!f?oQ%$>|JMP z+%%#min#Po;KG?R2P7_>_!BttFL2<5IKcP4^^dA*4@kwyKEIiHGxKKN=e@Q2=TdF= z-L3tICLb5ypW!h-L&W%3G$88EbVXe??RTi_Om&yK?o{`v>(Tk@v{|BVi4M~xQc7Vt zG*U(PCu>)zTOnPc{sMIuc(<@BvPOE5j$vJ=*de_{N7&Y-Z-|}}J?Jh|>~W*JLUBnH zY*!ZL0?+R>D)2w~2cNcm8|p!7#-mY|TOD`Kjn%`}<4BKelBKhwE12EDV-_GZ)>h?NkBPqw38m>z2r46`(` zSss`uPeyk8g=R0zVf{F*W(JUe=W>(8ho8?_&~Imq~pi74DDu^pCzf~ z*c~QmoShk4o$Jk(c_EaWMq;^X&qsP^^NQi9T-j`dgCsONo0c!@{YD*9R?iIESr6@Y zmTEW#L=cT&GQigimB&w#!9eDioVLIPpdNgtBloW6t-oo#$)Dp z(HF(;Z?a3&U#9+wqzo@Op9FMKPU>VY0NJGpy32EPS8z-1gzq%r3rjwM5K#Af2y)!Y z^W-D$VvajD8;x`<4D9QCn4}?Ukk8@wd9u$L;4}RH4Q`(oZ(QWboJl_CiO>AwTv*G7 zc5JwiBeh?xA613r4?Ob8BABt2?e+ zSE~*lZ?#-^-(XE`I7{e@FhM?i1WALxpdbzUc=<><%`Y+DVNt@0N(u@?R5?^t&E^_cr)tYGPmX|u|2ztX0Au)L6oKFPOY(P=xW|O2=3k8+uLp54sQ9aF9P_{ zY`uI4MhExpJDBCSI(HBJX6t2BUp+4K=R}M4dWyQ_^|yTB4)u;xOV!my zXRBzl1kEg0JeEay;8G-4?QgOsH_vYcf0c#Q4SqlP{BA933FoQg?yYcOw7o6(=s2s@__udbzsuFI34s A#{d8T diff --git a/integrationtestlib.pyc b/integrationtestlib.pyc deleted file mode 100644 index 9d48d7602f2af8e5ba7044c1a653cf57c2b11eaf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3181 zcmcIm&u9U-prdfZ{ED`n~nZjibubF`m{@v zUx@c@JVv5PiMG)Ol!i16Xd8l;L)s3fazxwFRIbr>Z7R>v_8g@V{RAiTl-8V3r*zH< z3+!{NInPV|84$&rPfFb{WiDDWSGQszhPt1~B25Zyv)*`ZG)7;%xn*T(RmujBszQky zH|=0m+_--IYd3tSvIAZ6=wn^{Se4>&HIR8OQ}pEAli%)|wS8#@S<$z;SW{^g!|ju* z9BJd$eL7HL#KL=vs7?pPs37O>jIDFS#5GIW*2dF~@>Y$>~{ zBfGmkZ0y1Zwid^m-x=AT><(+=Ki8!a!)zD6@#>Va9E+YVoujO<3P)ixT?ng;HLIdB zl^hPo7{=UbHnM!A`7p44RtR0D3iCB=kt<}PR~23MA%~gGl!>!{@N0E(*u)Q4+Fd^W z1{zDW6VS0Mw#Fe*sCWqB0g*(mliQi&xr#v zHr17l*~Oi*U*!rDxkOMBroZZQrw%Nej@YZ1THinDsu6EtV&2k)LN?)ND@&EQ-|MOF z)YUAWVTQ7tt?JF&%2ws8UW72bIk6#HO-_Zaag*iB&Ot!pa@FKCS++u)O}xl8<`Jaz zfzH*s?BF~GYInUmkVRja1T~&nT_&bmW+S`)ntJMB`CM`CfR$SSRecp+4F^HX7lJN_ zRL8>ry^F{Efo6REFoK;+6WphQQfz{I3>7=yXWJE}BO1bPxkhM9S5`SHwC_2<||P1bpJ2?zPU zDimM|z;S@@Qk)#W@s9tiAkSj&Rk2$zH4U_}8FcbxdV5x`mhtLxk;pVHkqNiIgjvCI z#g>MPXDK!@UOofNd4M=ktI2CvNBg?FtE}au_Z3*%*aFON&!6a>G4~`$5@W!Ojl+m6 z`+H8%BLI03Bc+llqt1Bf3_;{>h0<6?0WT&;FizX-2PFY#(2_n=Xp^hlgAWT?)=bse z0kG`?pllo=F~k~U2e`%Q?f}?43h=TO zEIe2*K6p3*bDa_FAPZ)2K;|)YIpqEjF;@WQfHqG6HbDRj4(u=nZbOIOhAAf)gOuQHyT?`5|uHp~AIM2sI^l59w5S z&dm0}-lznRlh^JG57^gDneF=;H?u~WJARO2hSigG-Wj#uYWJA|Mwo`WCR+S8gxge% zlpf)uWG|BwTnp#_?KceQu)sB)Ih1le-7iaBZivr~ zi0@6)qzm8ibo$k?(Q*XpFxJK-YP;);0Xq9Kad35aEKuETGiW>)xEtC54*f}1^t3be z*Lh+)ZYW6{A0+L|nAhaWZLe^1f!#O&a-4}BkCdYo<3M?ERsZX%2TbijgznzcOB+&s;%6E9ZdJkAlVELa-Qp7_Q*G6s$zcVbi6P zNh(QF-A$7AGUWCr+~A=$54pVhbKp$1uc0~FiIe{CZwNk&&0MF|P~Ex++3^16!?U0q L_ZKZ*TD`PiBHt*yVx_TZ0|b{Ddrh2JOms!Jq3 zKnE!UKL;U$4h}}+9CUKyu>~D#JkCQW555JW06mnYdL6DO5{O`#NIqi;*-RSc+-o-md$yPbX_GptwV7f>(#-PZEv=; zmuTVB^0QP5{^d8H^RL#|zs03$s{3(5EBmqhB~AE#+V`T!^N|w~Cwh{uUU;e>$P*pQ zRpF<0@HhUNdLHdJoLViGQgn3?%N;KWZw`v#>Qi!)d=(fO1E*ENF;sF$G_hS*qSvmg zZ8M2tKMip@l0p`kk=nu0T0f4&ns?j}R9~E}b^D$?5z0ju105%>>L$TJue~+BbrudO zeSohbABwC?UQXki$G3*B`WiWy#G%RK5R+)Z5Yv~3AttT>!vf`JScI!J&;X|x2vTNf z4qoM8I0e^JaB9IdgXR9$?@LRYgGz-_o2~!v06f37}j9pgY8l5VEe~;$dKdr;CWV}L)_ecBYP$^m+Th8{+ zoBcok2`@8O7T&OuF&`KynoOIb^xwmZT%^PM9SPRW=3$x)Vr9ya^Miqqil+na@$OBj za2->nuJ#fkWbBJoDKz(_&vjo&u7vaz%11&e??l+7vwqnX1FAAb#mafRpTrj^B)J%` z@Sw+2=^clH)Mv&rOGZa4@CC2;dRVTRmS!vks#A05`P=>kGaq1jsmfcUe!>o+VtX2E zeSgAr+d^sLO-*>F=s(WdP!({Y*{D6|rs~SJVH~yU4%bwFT$PDtMo<&qFcQxW8ciP1 zW<_GQQF`b!zTa={j#TlDfL4t3Vby7KRmACcM~!ybZdRzNFtNBEchAJgzv_Z8)B*})I|Cok}PMg z(g{HZ>DfDIwawu0z-gO3jqQ4~Z6JWorCHstn+dw|!c6EWYkSpZv;M*;rAD3ghF>Cb+UwxsLzl?@t5PMn`lT1qattM-s zzmtaHvAmX~{1Ee~U+YHdvOu1DVSpB+IF+ce$Gzkv zjfB+y)n{bmh#Gp-!A@$teDk*A7lH0mSC18rQDMTk2*QvbV~_X67$FETOkl*rVi^aB z7CEn9deI;h8{``2gDbr7?lSy%PlRF2>*pf5!oXKn9;19f_k?+Uz)96?vuZOJE zr?_qc+xU0&n^|LQnU-xOMqsD5+wKVpk8PAwoQ=4P3BtR-vui|I$R>>uK9VSWGr diff --git a/nonportable.py b/nonportable.py deleted file mode 100755 index 2c0a9d3..0000000 --- a/nonportable.py +++ /dev/null @@ -1,1028 +0,0 @@ -""" -Author: Justin Cappos - -Start Date: July 1st, 2008 - -Description: -Handles exiting and killing all threads, tracking CPU / Mem usage, etc. - - -""" - - -import threading -import os -import time - -# needed for sys.stderr and windows Popen hackery -import sys - -# needed for signal numbers -import signal - -# needed for harshexit -import harshexit - -# print useful info when exiting... -import tracebackrepy - -# used to query status, etc. -# This may fail on Windows CE -try: - import subprocess - mobile_no_subprocess = False -except ImportError: - # Set flag to avoid using subprocess - mobile_no_subprocess = True - - -# used for socket.error -import socket - -# need for status retrieval -import statusstorage - -# Get constants -import repy_constants - -# Get access to the status interface so we can start it -import nmstatusinterface - -# This gives us our restrictions information -import nanny_resource_limits - -# This is used for IPC -import marshal - -# This will fail on non-windows systems -try: - import windows_api as windows_api -except: - windows_api = None - -# Armon: This is a place holder for the module that will be imported later -os_api = None - -# Armon: See additional imports at the bottom of the file - -class UnsupportedSystemException(Exception): - pass - - - -################### Publicly visible functions ####################### - -# check the disk space used by a dir. -def compute_disk_use(dirname): - # Convert path to absolute - dirname = os.path.abspath(dirname) - - diskused = 0 - - for filename in os.listdir(dirname): - try: - diskused = diskused + os.path.getsize(os.path.join(dirname, filename)) - except IOError: # They likely deleted the file in the meantime... - pass - except OSError: # They likely deleted the file in the meantime... - pass - - # charge an extra 4K for each file to prevent lots of little files from - # using up the disk. I'm doing this outside of the except clause in - # the failure to get the size wasn't related to deletion - diskused = diskused + 4096 - - return diskused - - -# prepare a socket so it behaves how we want -def preparesocket(socketobject): - - if ostype == 'Windows': - # we need to set a timeout because on rare occasions Windows will block - # on recvmess with a bad socket. This prevents it from locking the system. - # We use select, so the timeout should never be actually used. - - # The actual value doesn't seem to matter, so I'll use 100 years - socketobject.settimeout(60*60*24*365*100) - - elif ostype == 'Linux' or ostype == 'Darwin': - # Linux seems not to care if we set the timeout, Mac goes nuts and refuses - # to let you send from a socket you're receiving on (why?) - pass - - elif ostype == "WindowsCE": - # No known issues, so just go - pass - - else: - raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" - - -# Armon: Also launches the nmstatusinterface thread. -# This will result in an internal thread on Windows -# and a thread on the external process for *NIX -def monitor_cpu_disk_and_mem(): - if ostype == 'Linux' or ostype == 'Darwin': - # Startup a CPU monitoring thread/process - do_forked_resource_monitor() - - elif ostype == 'Windows' or ostype == 'WindowsCE': - # Now we set up a cpu nanny... - # Use an external CPU monitor for WinCE - if ostype == 'WindowsCE': - nannypath = "\"" + repy_constants.PATH_SEATTLE_INSTALL + 'win_cpu_nanny.py' + "\"" - cmdline = str(os.getpid())+" "+str(nanny_resource_limits.resource_limit("cpu"))+" "+str(repy_constants.CPU_POLLING_FREQ_WINCE) - windows_api.launch_python_script(nannypath, cmdline) - else: - WinCPUNannyThread().start() - - # Launch mem./disk resource nanny - WindowsNannyThread().start() - - # Start the nmstatusinterface. Windows means repy isn't run in an external - # process, so pass None instead of a process id. - nmstatusinterface.launch(None) - else: - raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")" - - - - -# Elapsed time -elapsedtime = 0 - -# Store the uptime of the system when we first get loaded -starttime = 0 -last_uptime = 0 - -# Timestamp from our starting point -last_timestamp = time.time() - -# This is our uptime granularity -granularity = 1 - -# This ensures only one thread calling getruntime at any given time -runtimelock = threading.Lock() - -def getruntime(): - """ - - Return the amount of time the program has been running. This is in - wall clock time. This function is not guaranteed to always return - increasing values due to NTP, etc. - - - None - - - None. - - - None - - - By default this will have the same granularity as the system clock. However, if time - goes backward due to NTP or other issues, getruntime falls back to system uptime. - This has much lower granularity, and varies by each system. - - - The elapsed time as float - """ - global starttime, last_uptime, last_timestamp, elapsedtime, granularity, runtimelock - - # Get the lock - runtimelock.acquire() - - # Check if Linux or BSD/Mac - if ostype in ["Linux", "Darwin"]: - uptime = os_api.get_system_uptime() - - # Check if time is going backward - if uptime < last_uptime: - # If the difference is less than 1 second, that is okay, since - # The boot time is only precise to 1 second - if (last_uptime - uptime) > 1: - raise EnvironmentError, "Uptime is going backwards!" - else: - # Use the last uptime - uptime = last_uptime - - # No change in uptime - diff_uptime = 0 - else: - # Current uptime, minus the last uptime - diff_uptime = uptime - last_uptime - - # Update last uptime - last_uptime = uptime - - # Check for windows - elif ostype in ["Windows", "WindowsCE"]: - # Release the lock - runtimelock.release() - - # Time.clock returns elapsedtime since the first call to it, so this works for us - return time.clock() - - # Who knows... - else: - raise EnvironmentError, "Unsupported Platform!" - - # Current uptime minus start time - runtime = uptime - starttime - - # Get runtime from time.time - current_time = time.time() - - # Current time, minus the last time - diff_time = current_time - last_timestamp - - # Update the last_timestamp - last_timestamp = current_time - - # Is time going backward? - if diff_time < 0.0: - # Add in the change in uptime - elapsedtime += diff_uptime - - # Lets check if time.time is too skewed - else: - skew = abs(elapsedtime + diff_time - runtime) - - # If the skew is too great, use uptime instead of time.time() - if skew < granularity: - elapsedtime += diff_time - else: - elapsedtime += diff_uptime - - # Release the lock - runtimelock.release() - - # Return the new elapsedtime - return elapsedtime - - -# This lock is used to serialize calls to get_resouces -get_resources_lock = threading.Lock() - -# These are the resources we expose in get_resources -exposed_resources = set(["cpu","memory","diskused","events", - "filewrite","fileread","filesopened", - "insockets","outsockets","netsend", - "netrecv","loopsend","looprecv", - "lograte","random","messport","connport"]) - -# These are the resources that we don't flatten using -# len() for the usage. For example, instead of given the -# set of thread's, we flatten this into N number of threads. -flatten_exempt_resources = set(["connport","messport"]) - -# Cache the disk used from the external process -cached_disk_used = 0L - -# This array holds the times that repy was stopped. -# It is an array of tuples, of the form (time, amount) -# where time is when repy was stopped (from getruntime()) and amount -# is the stop time in seconds. The last process_stopped_max_entries are retained -process_stopped_timeline = [] -process_stopped_max_entries = 100 - -# Method to expose resource limits and usage -def get_resources(): - """ - - Returns the resouce utilization limits as well - as the current resource utilization. - - - None. - - - A tuple of dictionaries and an array (limits, usage, stoptimes). - - Limits is the dictionary which maps the resouce name - to its maximum limit. - - Usage is the dictionary which maps the resource name - to its current usage. - - Stoptimes is an array of tuples with the times which the Repy proces - was stopped and for how long, due to CPU over-use. - Each entry in the array is a tuple (TOS, Sleep Time) where TOS is the - time of stop (respective to getruntime()) and Sleep Time is how long the - repy process was suspended. - - The stop times array holds a fixed number of the last stop times. - Currently, it holds the last 100 stop times. - """ - # Acquire the lock - get_resources_lock.acquire() - - # Construct the dictionaries as copies from nanny - limits = nanny_resource_limits.resource_restriction_table.copy() - usage = nanny_resource_limits.resource_consumption_table.copy() - - # These are the type we need to copy or flatten - check_types = set([list,dict,set]) - - # Check the limits dictionary for bad keys - for resource in limits.keys(): - # Remove any resources we should not expose - if resource not in exposed_resources: - del limits[resource] - - # Check the type - if type(limits[resource]) in check_types: - # Copy the data structure - limits[resource] = limits[resource].copy() - - # Check the usage dictionary - for resource in usage.keys(): - # Remove any resources that are not exposed - if resource not in exposed_resources: - del usage[resource] - - # Check the type, copy any data structures - # Flatten any structures using len() other than - # "connport" and "messport" - if type(usage[resource]) in check_types: - # Check if they are exempt from flattening, store a shallow copy - if resource in flatten_exempt_resources: - usage[resource] = usage[resource].copy() - - # Store the size of the data set - else: - usage[resource] = len(usage[resource]) - - - - # Calculate all the usage's - pid = os.getpid() - - # Get CPU and memory, this is thread specific - if ostype in ["Linux", "Darwin"]: - - # Get CPU first, then memory - usage["cpu"] = os_api.get_process_cpu_time(pid) - - # This uses the cached PID data from the CPU check - usage["memory"] = os_api.get_process_rss() - - # Get the thread specific CPU usage - usage["threadcpu"] = os_api.get_current_thread_cpu_time() - - - # Windows Specific versions - elif ostype in ["Windows","WindowsCE"]: - - # Get the CPU time - usage["cpu"] = windows_api.get_process_cpu_time(pid) - - # Get the memory, use the resident set size - usage["memory"] = windows_api.process_memory_info(pid)['WorkingSetSize'] - - # Get thread-level CPU - usage["threadcpu"] = windows_api.get_current_thread_cpu_time() - - # Unknown OS - else: - raise EnvironmentError("Unsupported Platform!") - - # Use the cached disk used amount - usage["diskused"] = cached_disk_used - - # Release the lock - get_resources_lock.release() - - # Copy the stop times - stoptimes = process_stopped_timeline[:] - - # Return the dictionaries and the stoptimes - return (limits,usage,stoptimes) - - -################### Windows specific functions ####################### - -class WindowsNannyThread(threading.Thread): - - def __init__(self): - threading.Thread.__init__(self,name="NannyThread") - - def run(self): - # Calculate how often disk should be checked - if ostype == "WindowsCE": - disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_WINCE / repy_constants.CPU_POLLING_FREQ_WINCE) - else: - disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_WIN / repy_constants.CPU_POLLING_FREQ_WIN) - current_interval = 0 # What cycle are we on - - # Elevate our priority, above normal is higher than the usercode, and is enough for disk/mem - windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL) - - # need my pid to get a process handle... - mypid = os.getpid() - - # run forever (only exit if an error occurs) - while True: - try: - # Check memory use, get the WorkingSetSize or RSS - memused = windows_api.process_memory_info(mypid)['WorkingSetSize'] - - if memused > nanny_resource_limits.resource_limit("memory"): - # We will be killed by the other thread... - raise Exception, "Memory use '"+str(memused)+"' over limit '"+str(nanny_resource_limits.resource_limit("memory"))+"'" - - # Increment the interval we are on - current_interval += 1 - - # Check if we should check the disk - if (current_interval % disk_interval) == 0: - # Check diskused - diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR) - if diskused > nanny_resource_limits.resource_limit("diskused"): - raise Exception, "Disk use '"+str(diskused)+"' over limit '"+str(nanny_resource_limits.resource_limit("diskused"))+"'" - - if ostype == 'WindowsCE': - time.sleep(repy_constants.CPU_POLLING_FREQ_WINCE) - else: - time.sleep(repy_constants.CPU_POLLING_FREQ_WIN) - - except windows_api.DeadProcess: - # Process may be dead, or die while checking memory use - # In any case, there is no reason to continue running, just exit - harshexit.harshexit(99) - - except: - tracebackrepy.handle_exception() - print >> sys.stderr, "Nanny died! Trying to kill everything else" - harshexit.harshexit(20) - - -# Windows specific CPU Nanny Stuff -winlastcpuinfo = [0,0] - -# Enfoces CPU limit on Windows and Windows CE -def win_check_cpu_use(cpulim, pid): - global winlastcpuinfo - - # get use information and time... - now = getruntime() - - # Get the total cpu time - usertime = windows_api.get_process_cpu_time(pid) - - useinfo = [usertime, now] - - # get the previous time and cpu so we can compute the percentage - oldusertime = winlastcpuinfo[0] - oldnow = winlastcpuinfo[1] - - if winlastcpuinfo == [0,0]: - winlastcpuinfo = useinfo - # give them a free pass if it's their first time... - return 0 - - # save this data for next time... - winlastcpuinfo = useinfo - - # Get the elapsed time... - elapsedtime = now - oldnow - - # This is a problem - if elapsedtime == 0: - return -1 # Error condition - - # percent used is the amount of change divided by the time... - percentused = (usertime - oldusertime) / elapsedtime - - # Calculate amount of time to sleep for - stoptime = nanny_resource_limits.calculate_cpu_sleep_interval(cpulim, percentused,elapsedtime) - - if stoptime > 0.0: - # Try to timeout the process - if windows_api.timeout_process(pid, stoptime): - # Log the stoptime - process_stopped_timeline.append((now, stoptime)) - - # Drop the first element if the length is greater than the maximum entries - if len(process_stopped_timeline) > process_stopped_max_entries: - process_stopped_timeline.pop(0) - - # Return how long we slept so parent knows whether it should sleep - return stoptime - - else: - # Process must have been making system call, try again next time - return -1 - - # If the stop time is 0, then avoid calling timeout_process - else: - return 0.0 - - -# Dedicated Thread for monitoring CPU, this is run as a part of repy -class WinCPUNannyThread(threading.Thread): - # Thread variables - pid = 0 # Process pid - - def __init__(self): - self.pid = os.getpid() - threading.Thread.__init__(self,name="CPUNannyThread") - - def run(self): - # Elevate our priority, set us to the highest so that we can more effectively throttle - success = windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_HIGHEST) - - # If we failed to get HIGHEST priority, try above normal, else we're still at default - if not success: - windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL) - - # Run while the process is running - while True: - try: - # Get the frequency - frequency = repy_constants.CPU_POLLING_FREQ_WIN - - # Base amount of sleeping on return value of - # win_check_cpu_use to prevent under/over sleeping - slept = win_check_cpu_use(nanny_resource_limits.resource_limit("cpu"), self.pid) - - if slept == -1: - # Something went wrong, try again - pass - elif (slept < frequency): - time.sleep(frequency-slept) - - except windows_api.DeadProcess: - # Process may be dead - harshexit.harshexit(97) - - except: - tracebackrepy.handle_exception() - print >> sys.stderr, "CPU Nanny died! Trying to kill everything else" - harshexit.harshexit(25) - - - - - - - -############## *nix specific functions (may include Mac) ############### - -# This method handles messages on the "diskused" channel from -# the external process. When the external process measures disk used, -# it is piped in and cached for calls to getresources. -def IPC_handle_diskused(bytes): - cached_disk_used = bytes - - -# This method handles meessages on the "repystopped" channel from -# the external process. When the external process stops repy, it sends -# a tuple with (TOS, amount) where TOS is time of stop (getruntime()) and -# amount is the amount of time execution was suspended. -def IPC_handle_stoptime(info): - # Push this onto the timeline - process_stopped_timeline.append(info) - - # Drop the first element if the length is greater than the max - if len(process_stopped_timeline) > process_stopped_max_entries: - process_stopped_timeline.pop(0) - - -# Use a special class of exception for when -# resource limits are exceeded -class ResourceException(Exception): - pass - - -# Armon: Method to write a message to the pipe, used for IPC. -# This allows the pipe to be multiplexed by sending simple dictionaries -def write_message_to_pipe(writehandle, channel, data): - """ - - Writes a message to the pipe - - - writehandle: - A handle to a pipe which can be written to. - - channel: - The channel used to describe the data. Used for multiplexing. - - data: - The data to send. - - - As with os.write() - EnvironmentError will be thrown if os.write() sends 0 bytes, indicating the - pipe is broken. - """ - # Construct the dictionary - mesg_dict = {"ch":channel,"d":data} - - # Convert to a string - mesg_dict_str = marshal.dumps(mesg_dict) - - # Make a full string - mesg = str(len(mesg_dict_str)) + ":" + mesg_dict_str - - # Send this - index = 0 - while index < len(mesg): - bytes = os.write(writehandle, mesg[index:]) - if bytes == 0: - raise EnvironmentError, "Write send 0 bytes! Pipe broken!" - index += bytes - - -# Armon: Method to read a message from the pipe, used for IPC. -# This allows the pipe to be multiplexed by sending simple dictionaries -def read_message_from_pipe(readhandle): - """ - - Reads a message from a pipe. - - - readhandle: - A handle to a pipe which can be read from - - - As with os.read(). - EnvironmentError will be thrown if os.read() returns a 0-length string, indicating - the pipe is broken. - - - A tuple (Channel, Data) where Channel is used to multiplex the pipe. - """ - # Read until we get to a colon - data = "" - index = 0 - - # Loop until we get a message - while True: - - # Read in data if the buffer is empty - if index >= len(data): - # Read 8 bytes at a time - mesg = os.read(readhandle,8) - if len(mesg) == 0: - raise EnvironmentError, "Read returned emtpy string! Pipe broken!" - data += mesg - - # Increment the index while there is data and we have not found a colon - while index < len(data) and data[index] != ":": - index += 1 - - # Check if we've found a colon - if len(data) > index and data[index] == ":": - # Get the message length - mesg_length = int(data[:index]) - - # Determine how much more data we need - more_data = mesg_length - len(data) + index + 1 - - # Read in the rest of the message - while more_data > 0: - mesg = os.read(readhandle, more_data) - if len(mesg) == 0: - raise EnvironmentError, "Read returned emtpy string! Pipe broken!" - data += mesg - more_data -= len(mesg) - - # Done, convert the message to a dict - whole_mesg = data[index+1:] - mesg_dict = marshal.loads(whole_mesg) - - # Return a tuple (Channel, Data) - return (mesg_dict["ch"],mesg_dict["d"]) - - - -# This dictionary defines the functions that handle messages -# on each channel. E.g. when a message arrives on the "repystopped" channel, -# the IPC_handle_stoptime function should be invoked to handle it. -IPC_HANDLER_FUNCTIONS = {"repystopped":IPC_handle_stoptime, - "diskused":IPC_handle_diskused } - - -# This thread checks that the parent process is alive and invokes -# delegate methods when messages arrive on the pipe. -class parent_process_checker(threading.Thread): - def __init__(self, readhandle): - """ - - Terminates harshly if our parent dies before we do. - - - readhandle: A file descriptor to the handle of a pipe to our parent. - """ - # Name our self - threading.Thread.__init__(self, name="ParentProcessChecker") - - # Store the handle - self.readhandle = readhandle - - def run(self): - # Run forever - while True: - # Read a message - try: - mesg = read_message_from_pipe(self.readhandle) - except Exception, e: - break - - # Check for a handler function - if mesg[0] in IPC_HANDLER_FUNCTIONS: - # Invoke the handler function with the data - handler = IPC_HANDLER_FUNCTIONS[mesg[0]] - handler(mesg[1]) - - # Print a message if there is a message on an unknown channel - else: - print "[WARN] Message on unknown channel from parent process:", mesg[0] - - - ### We only leave the loop on a fatal error, so we need to exit now - - # Write out status information, our parent would do this, but its dead. - statusstorage.write_status("Terminated") - print >> sys.stderr, "Monitor process died! Terminating!" - harshexit.harshexit(70) - - - -# For *NIX systems, there is an external process, and the -# pid for the actual repy process is stored here -repy_process_id = None - -# Forks Repy. The child will continue execution, and the parent -# will become a resource monitor -def do_forked_resource_monitor(): - global repy_process_id - - # Get a pipe - (readhandle, writehandle) = os.pipe() - - # I'll fork a copy of myself - childpid = os.fork() - - if childpid == 0: - # We are the child, close the write end of the pipe - os.close(writehandle) - - # Start a thread to check on the survival of the parent - parent_process_checker(readhandle).start() - - return - else: - # We are the parent, close the read end - os.close(readhandle) - - # Store the childpid - repy_process_id = childpid - - # Start the nmstatusinterface - nmstatusinterface.launch(repy_process_id) - - # Small internal error handler function - def _internal_error(message): - try: - print >> sys.stderr, message - sys.stderr.flush() - except: - pass - - # Stop the nmstatusinterface, we don't want any more status updates - nmstatusinterface.stop() - - # Kill repy - harshexit.portablekill(childpid) - - try: - # Write out status information, repy was Stopped - statusstorage.write_status("Terminated") - except: - pass - - try: - # Some OS's require that you wait on the pid at least once - # before they do any accounting - (pid, status) = os.waitpid(childpid,os.WNOHANG) - - # Launch the resource monitor, if it fails determine why and restart if necessary - resource_monitor(childpid, writehandle) - - except ResourceException, exp: - # Repy exceeded its resource limit, kill it - _internal_error(str(exp)+" Impolitely killing child!") - harshexit.harshexit(98) - - except Exception, exp: - # There is some general error... - try: - (pid, status) = os.waitpid(childpid,os.WNOHANG) - except: - # This means that the process is dead - pass - - # Check if this is repy exiting - if os.WIFEXITED(status) or os.WIFSIGNALED(status): - sys.exit(0) - - else: - _internal_error(str(exp)+" Monitor death! Impolitely killing child!") - raise - -def resource_monitor(childpid, pipe_handle): - """ - - Function runs in a loop forever, checking resource usage and throttling CPU. - Checks CPU, memory, and disk. - - - childpid: - The child pid, e.g. the pid of repy - - pipe_handle: - A handle to the pipe to the repy process. Allows sending resource use information. - """ - # Get our pid - ourpid = os.getpid() - - # Calculate how often disk should be checked - disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_LINUX / repy_constants.CPU_POLLING_FREQ_LINUX) - current_interval = -1 # What cycle are we on - - # Store time of the last interval - last_time = getruntime() - last_CPU_time = 0 - resume_time = 0 - - # Run forever... - while True: - ########### Check CPU ########### - # Get elasped time - currenttime = getruntime() - elapsedtime1 = currenttime - last_time # Calculate against last run - elapsedtime2 = currenttime - resume_time # Calculate since we last resumed repy - elapsedtime = min(elapsedtime1, elapsedtime2) # Take the minimum interval - last_time = currenttime # Save the current time - - # Safety check, prevent ZeroDivisionError - if elapsedtime == 0.0: - continue - - # Get the total cpu at this point - totalCPU = os_api.get_process_cpu_time(ourpid) # Our own usage - totalCPU += os_api.get_process_cpu_time(childpid) # Repy's usage - - # Calculate percentage of CPU used - percentused = (totalCPU - last_CPU_time) / elapsedtime - - # Do not throttle for the first interval, wrap around - # Store the totalCPU for the next cycle - if last_CPU_time == 0: - last_CPU_time = totalCPU - continue - else: - last_CPU_time = totalCPU - - # Calculate stop time - stoptime = nanny_resource_limits.calculate_cpu_sleep_interval(nanny_resource_limits.resource_limit("cpu"), percentused, elapsedtime) - - # If we are supposed to stop repy, then suspend, sleep and resume - if stoptime > 0.0: - # They must be punished by stopping - os.kill(childpid, signal.SIGSTOP) - - # Sleep until time to resume - time.sleep(stoptime) - - # And now they can start back up! - os.kill(childpid, signal.SIGCONT) - - # Save the resume time - resume_time = getruntime() - - # Send this information as a tuple containing the time repy was stopped and - # for how long it was stopped - write_message_to_pipe(pipe_handle, "repystopped", (currenttime, stoptime)) - - - ########### End Check CPU ########### - # - ########### Check Memory ########### - - # Get how much memory repy is using - memused = os_api.get_process_rss() - - # Check if it is using too much memory - if memused > nanny_resource_limits.resource_limit("memory"): - raise ResourceException, "Memory use '"+str(memused)+"' over limit '"+str(nanny_resource_limits.resource_limit("memory"))+"'." - - ########### End Check Memory ########### - # - ########### Check Disk Usage ########### - # Increment our current cycle - current_interval += 1; - - # Check if it is time to check the disk usage - if (current_interval % disk_interval) == 0: - # Reset the interval - current_interval = 0 - - # Calculate disk used - diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR) - - # Raise exception if we are over limit - if diskused > nanny_resource_limits.resource_limit("diskused"): - raise ResourceException, "Disk use '"+str(diskused)+"' over limit '"+str(nanny_resource_limits.resource_limit("diskused"))+"'." - - # Send the disk usage information, raw bytes used - write_message_to_pipe(pipe_handle, "diskused", diskused) - - ########### End Check Disk ########### - - # Sleep before the next iteration - time.sleep(repy_constants.CPU_POLLING_FREQ_LINUX) - - -########### functions that help me figure out the os type ########### - -# Calculates the system granularity -def calculate_granularity(): - global granularity - - if ostype in ["Windows", "WindowsCE"]: - # The Granularity of getTickCount is 1 millisecond - granularity = pow(10,-3) - - elif ostype == "Linux": - # We don't know if the granularity is correct yet - correct_granularity = False - - # How many times have we tested - tests = 0 - - # Loop while the granularity is incorrect, up to 10 times - while not correct_granularity and tests <= 10: - current_granularity = os_api.get_uptime_granularity() - uptime_pre = os_api.get_system_uptime() - time.sleep(current_granularity / 10) - uptime_post = os_api.get_system_uptime() - - diff = uptime_post - uptime_pre - - correct_granularity = int(diff / current_granularity) == (diff / current_granularity) - tests += 1 - - granularity = current_granularity - - elif ostype == "Darwin": - granularity = os_api.get_uptime_granularity() - - - -# Call init_ostype!!! -harshexit.init_ostype() - -ostype = harshexit.ostype -osrealtype = harshexit.osrealtype - -# Import the proper system wide API -if osrealtype == "Linux": - import linux_api as os_api -elif osrealtype == "Darwin": - import darwin_api as os_api -elif osrealtype == "FreeBSD": - import freebsd_api as os_api -elif ostype == "Windows" or ostype == "WindowsCE": - # There is no real reason to do this, since windows is imported separately - import windows_api as os_api -else: - # This is a non-supported OS - raise UnsupportedSystemException, "The current Operating System is not supported! Fatal Error." - -# Set granularity -calculate_granularity() - -# For Windows, we need to initialize time.clock() -if ostype in ["Windows", "WindowsCE"]: - time.clock() - -# Initialize getruntime for other platforms -else: - # Set the starttime to the initial uptime - starttime = getruntime() - last_uptime = starttime - - # Reset elapsed time - elapsedtime = 0 - - -# Conrad: initialize nanny (Prevents circular imports) -# Note: nanny_resource_limits can be initialized at any time after getruntime() -# is defined, this just seems the most appropriate place to put the call. -nanny_resource_limits.init(getruntime) diff --git a/nonportable.pyc b/nonportable.pyc deleted file mode 100644 index f93e0ca4ef34b18f154aebee5aa93fe09a71006b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18576 zcmc&+Yiu0Xb-uH+TyocZi4{yN>r=}>%q9aqvkP2-!?q;|%B!}Fc zSrN?e~Kh6&<08C07-);Kp#lcq(xDnMN^=Ef&%%`0BwN$DvGv1(I0J* zw%>Q|%UO%S$lQg?{m&~&bedrU;9drO+B(yRq3B0{QE3^@uy9tT%}eJ8p_S7 znxR$@(ea#G$z^dJ)+zPl-sLZtPxAYiZENu ze@MB*$~~%F%nbX6nP3hW9pfHT>qpdG9L=NB9CLV3t>+}?G36dtE2q>t=3G|pJJiZ) z<(`mn->F8#(tzN@AFY1}0 z>Qic~prSRCw_`?fgAwJOWXw6PlH>F@;s@5O(`bZoskE3lQDR+j5|2f-J?mVYOj+m8o_)Ghy5hyv$ZsTm7+fq}a{{;K z#g@10qy4&tq_ySOY7(#2tYjnd95*biv9sao8(Eut7U^Q8+aOJ$&M~xJ4klGTDG0mUBpV8jAl)tLS0xR`hn{fy ztiD}V%SbJjIlToRf%4TL_Jg~YaQb1KI_L9V}dtoz? z%P)Ch41%w*N|{|Iy$Is4y5%L|>ZVss?!)o+VTeZLHJr%PBk`zWtORGmvr2GRyu;Gi}O>or2Xo7zR z!5(;<+ztyj7If5T{&Y9X9!9J3Q3N&vVeoEPSqq~r&#mytno-ruSS=Q&6wY$5QVoMR zae^dH*zD%a(zVLs+|1I_^|{LY!s61*_3JX9IEie|OYSDvQNwqW5u|16?e1%S-A7lt z_jE|fOUMHtlPWiFT)#fQ@Lc7pJ@luoWc2iqr6AA~mNOR5vQvlZ`M) z1w+D(1mU5Z1@73A2tA=uEFns8AT7e|tiWbL9aWNM4D$7&HK*#yLi8xYRu{s+6Y{b- zQ!U*{=(4~TQ?Axs$lUwEJjr-6!N)rx3>0F%Qupe(<0%t0#&abQTCXu;Tr`GreP&;N zFsCI8uT>R45rjOdS^VM&WP|j;Lhpjg$P!C}Xli7Pn|bDRslm@f1IV5U#YxZQf(9w9pLUG~<)-Z*4Q zjYIlEEP+rt5r}0|;W~`p$us7FG@+anWnzCr%7iq%;Pk5Cf=Yx^Q9K3-Jr4T5gK2{~ z^h;&KVCB57T{}Sh|^EJj~$I zOtjvtdm!sQ1!PD!9C7x!#)7tR>hI zEMW(T6dUr2-k}&M!@?c~&6*SW$)1Is;%24-Qnq8-x)$zuuXxdv<)@pQE_6M_+ExK( zcbv%WSfmw3R)}sQ>{*QUPg&qb*n2scHSDu)#Vtt(Y68;Mz3H@HIU62Yy;sK&g4!>u44r=L+(jC&*qO}K0SdmwA8o*cja-i$ATIeJgug(5_Qm1CEaBIi zE19J{L}RtQayPjP`7@Gf9J@DABnn;{6?vpH>B!qNXUD$3z@9 zya1{JtNMY)b+Ja6%_K{5=K`o8Uf{|lK|J!RuZTWT3q#y}6!f4H6H>w!H8Mzy%X0x2 zaKpOP!Li2F=Oh92Ak@GH7KJ zY7yinz%GEP7Q3qkVAAFgr5_goIBcp*%o=7@rt>;|G)s4>OTjJaLC@uD29VvGdO+4_ z+9?G1MAi|q_jS~(5?E0LV84dr7?%-C7!e0B!4VB0(-RcdG8u^MH4}oEx&@3gB8d0P z;gsEql>V+S;#x}w+F+c(a-zuDv&woNQ<=3TPC{Yy@o6nKtm}e|$@eJ(TOI9LI~zVk zOdS$r-x{f-%W5FV0I5-7*RMD0mW-43d5e~vx7j7Yzjd(BY=^``l=iupO`QFsT z<-FJYUF<^8tgixXa4%7rQm&(gUj3|=Nwq!Tp#-yF^H$|^XU`sJ&;}SH3+!2tk+5=Y z4}>1k#uFl1m!S>z%E z^d?3Y>RxL=SBV!?yj>5ywY_D&EY)%9g}|vjk-?|zV~dGrUy-hW9w zz#D;SKq(W{`T=DG)&QkotSwj|{!E|=20D_$4>A2A)magB#lQ!{0&pCl8!*ZROa(C@ zl~K%#R^lfOFeOeHSg6Bd02HMW3x`7;U@6!PFkBa_9T-lQ)G^%llLnDvYtI%9aD~kg z8AjG%1cPK0X%>u>uwyT8e^?v`fK+syejGQ+0B+g@bb3H4pqyFb9iVCRn99S|n45n- ztqBt)x2*rb4npIDU?A4IMIwjV}Q=+K~ z`xn#o71;503%`mM@e~5N+<-8iJ4s_lv`0M_LX{3Fi^@EVcFgi>vF9H}gYt0%TBS4* zgALARj&MYKZt=z~dv@*~hC%xzD^0NNTU8Pcd@r-2=B8x15Df=eZX@zTS|s4UQ)=m& zJvVcua?_r_Vb3qURGGPaD3bA{_Vx(nAs`<7uIi>m@nLQOZ z$Q`P=h_zX!#16}|C9>r6oG(Q}Ay+i39G9wmOGf+{32=eC z;Bw%AT2hGPL0dJ!h~T29rY27>CZGW^mIK#=%Br7Z7_34*wM0(mNamuBk&hK%7wuNJ zyEE>bP_k;SjDwTEm52ASGeE=Oq^N72rKwVj1?b zJ+>qfz_$)yZdaHl1IXZ1hKry{Rj6RV!Y6mJ5jM&^N@ zM*=~DlNI^}L~oUGbfnS7eh)h!5@>kPyaIS0QvYj)q}o^Yux_cKTx@4WBRXyyM*SYI zExERW9cds3JCae#4H<=89`+>KobqPBIReMsP;S^9G6v0jzB_L}iuz=9@`tC%lZ!0` z*k-;e+K-%Yk6%Ylo)C(=Ceo+McKo$yU1u9|)csx` z&Eq(h#^gLOJ7mycK%gUNO4#7x@3jnY1WZ6Uf-(j)mf}iD2DA9ZKSZEc%TOqbo)SaF z@9M}G^h1)`{RXKw`hnV8ri=in(ohQ54>(Gb9`*PQ#qn)|5G6a3i>kjAI%|l5d+eBvw*%f|J=2?#U*=>RbOR5 zeqrx0_&fuP!6XB@6%@KQU1;_%F!)6VA7SuO2Cp%|dbR6aOHI;v+^oWqk=jrrZ@cLQ z)xAu1H|~RfT?E(&uhdBluX00X!59{CSj-=zW8LV5_iqsYr9zMwtwRXPT6yp9t0W|2 z=;I`L?ghN)DCULEktMV)H@}1gt}1Rj_{5^n;ew_?R+CL!-AlaKMsbJCJ~T$iOEiCT zwvu8c0wD6=8P1zCP?}l|#F&&hKY@s*R?M8h8hQfK6oT3ldPa&N?^;4o7RL^>)Dr*(w+VelnRnfr>@T_*nN!H=9(&goYX!Ba%AQ5?im;BjDrPk|k# zXD7UjFiS*>HiF{}!Q;k|7z6AALj&a6T_B*s(P|}+eDha2&=W9JK|Mr9s}Yxei}EpI z1bDwwEfYAZ8(L;TZC?d5Ea0XK|t zD-R9Pobz#=#F@5kv4{rSI()l6?7q9u=h8k|V}FOtVMlZWNu^7zi z%BN*b2VI5Qfh;qL!X0?U*Q|C6={~m3!YhlNgncxCjSoHm>Yk~QmYYfI?P?Tmc>yh| zxCQ_+8=~5}5?&N+B2Y%oM7qs-owRPU3*CNlLC#aFMAXUvJjruim>*-U=@DbYH3LBYv5cNT|f@xSmC^$tnKtiHmqy#6W7ifS2TsljD z)9w*td`|Qls8k@)EmP894+Svq8eBplG6Mlv5TanVO@rM&rm9 zTM&52?h5$^Cy8#Jp{RgQ5(^rdC-Wtxnd~9slPzA@V%)-ENEgCWe9}Qo!nC;;kv!W0ANA@9d`Eh!`x%V= z2HWDql>M6weha~Z@D7b@+w6tnkJ}*J#Rfqs`^yM~Dd4e*8*7e3;KgeC@a*4ay{|L) z9abhk66T_JPqUsd!ed=BBDURu#|du)!EWrYA$cE|3?J8VhAke$D#<64lV(k%b~`yG#ZMvqv&%E~!pqhRDJaBCz-F)o`%%zBX~L`1OOf8_i$bfdE98p8 z_=5h1!b^$ z!z|MUqjA4+B!8sP_y1uy{ubu)m)XrN7{z|!LR80ib4~nW2fL#g%3Y;C1oR+^GjqUJ zl=`GnpAbYv!|?m1CZLAaL3vw6MTPwgRmB_0hJrau7BZ>k?-H%W<9}1Nj^c1hX9cK4=JUgg`?-01b61!F2H8iCG9-Z%n8_n~GB3 z*GJK0K$?8WkS2Q%{iFexgYN7*8qQ`gb@|2T2_iQ$4`6hejy!nrQeAxe<^40kX91ka zYcSvPOGFYMw_FonmA8>U?mjIA@y{XHJ0a6DR31A@^2Kms#OJ|svCaIg)fKwsl(#II7G-cF~A2-tpYDo7>!~F^|w9pJj-T+L;i!z=sY4SKZNI`cu)!knK%334CpflhE4OBc_cTGJ8Ho9 z1{V4dym5K+1l$5gjVdoO%||VvOWTqr6i^AR)%Yv)!LUm|3J@;S{1PxwI5cEBu8!i(V`7V2$5_D;_bVwc#7z1oX&bf4HOMYa!UXx1Xg(NQqmb z@&h7IkmrK;Y`TE1hnlOtWw*~QHi{PJJc%2%LA6{&A?05qk=b}c4CeF;(s%h zJSf1_M`XvKr9v^)^AHx!QNL;c>Eeqf7|2nIQO2MSjv>`qQL>Bj$s4u@<}W;JxS+LK zXxZ7Lh;uof1r7+`W#QQHNE6!!#*0@AdQf?hz(G&sIH z;#bf&1eFbJ`Z7KzUM%@x|tjrmoD<=_<|9`z9&%eMmp@o5Rl z$QDW)jcj~(h*vjZlE{~`a7k#WE0{LsH?mWfer2WK{nJvcYj6$+wBn4fX2!22;A+W( zCCySsn%E5RSmu}pY zM;7Am$#Q3JEG*gIV0(wbml&L2z#C_Sa8+5X;$*hC^YGb1u%rhjgFsWl5!F!6Euv$OE73uyOkhRReDf-isW8Jy#vPw9+q9 zXGV-j;bL(hN&i<4Ww&WDDWZ4FzRN{Ry(>BJv1E& z-+c(}WJ!F#ER@QRYBZohABTozL|bh?5$TxIw|RLjj)e<31b|Zjw9&@uBL=>w$vtTF zLp$s>+Ha?YPh5>W@ABf6_G!i3Ug7cjwUl)OPdT)|VNrkL4cj0;+H3t!6eg{!Fr?Ki z`Rr->e>+c7fQ($JxM8(Y(XSUU`}B}rW9<73S{`fJ@3^^|>iG7L*u_x<^>7t-pGpu` z+NE+DMBkDA4oj4N%`@dYaji>`Z)-?wG4^E!4>Nct1N!mBI$$G`@=NO1_TvoR!{7n~ z0!sU72687!;Me)qdzx9787wh)k->);P~{TuulARIhRGTOdA)Ipu}?DiJcCCWe1X9i z83_LtcK%H!zr#Qv|6eh7i~+fr{f`J_6XbEN{+?dH-6oM}9?nnu<(p$tmn)z5OOgzH z4L4#ncA2(mtC8!>zS`GWVuyv#<(<(b{lCw6ALkQ)5dnFv{xFPhoqO|q27otkxe{JH zxBoMZL1-X^4~}o9u^dYpT`qV<}Nz;Y(b-PvApWVc`+ zMc>4i@ELpuU%=UoD3o@Ge3|*anc3M*^xqE#Ki|AgOZdBb`2K(oKSQVhazqVs4XO>u zH=xoX*A@1l>OtODXh7A6d=n}I@-2n;6hD9pNn|h_z#Q|pq1uLI=f2$G8G`$^h_1z= zsV%!$D~GVJOrQe%6+(`B+k9cA;00$hAud@V+N=7aXxaL@5E|x?xl9N6b-0w%&%b|@ z>&GDajKgw=v zERW>glw{9aYr1xiu?9y!!gEI1QQ*7OkCGiE;Xm*RhQmd(V#C?z#gZ@B#jsq=>N(?d z)znMTw3L_aaxI28Liw#}+0(y24Fe GllBMO7q0>U diff --git a/repy_v1 b/repy_v1 deleted file mode 160000 index e938a69..0000000 --- a/repy_v1 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e938a694f37771788df1769f95bf02c70fca0e32 diff --git a/repyhelper.py b/repyhelper.py deleted file mode 100644 index a77b344..0000000 --- a/repyhelper.py +++ /dev/null @@ -1,495 +0,0 @@ -""" - - repyhelper.py - - - November 2008 - - - Andreas Sekine - Heavily revised by: Justin Cappos - - - Make porting Repy code to regular python easier. The main interface - is through repyhelper.translate and repyhelper.translate_and_import - - - JAC Note: I wanted to add an interface that allowed a user to specify - a repyhelper path that is separate from the Python path. This seems to - be impossible because you can't always use absolute names when importing - Python modules and because you can't prevent other imports from happening. - This prevent me from writing modules in a location other than the python - path. As jsamuel pointed out, it's not clear how this interacts with - non-relative path names (#291). My solution is to write these files into - the first item in the Python path. - -""" - - -import os # for file checks -import inspect # for fiddling with callstack/module namespaces - -# JAC / JS: to get the Python path -import sys - - -TRANSLATION_TAGLINE = "### Automatically generated by repyhelper.py ###" - - -WARNING_LABEL = """ -### THIS FILE WILL BE OVERWRITTEN! -### DO NOT MAKE CHANGES HERE, INSTEAD EDIT THE ORIGINAL SOURCE FILE -### -### If changes to the src aren't propagating here, try manually deleting this file. -### Deleting this file forces regeneration of a repy translation - -""" - - - -class TranslationError(Exception): - """ An error occurred during translation """ - - - -#For keeping a truly shared context between translated files -shared_context = {} -def get_shared_context(): - """ Ensure all imported repy code has a common 'mycontext' dict """ - global shared_context - return shared_context - - -# this specifies where the preprocessed files should end up. By default, they -# will be written to the same directory as they are in. If there is -# a relative path name, it will be written in sys.path[0] -_importcachedir = None - -def set_importcachedir(newimportcachedir): - """ - - Repyhelper creates Python versions of repy files. This function sets - the location where those all files will be stored. By default, files are - stored wherever they are found in the python path. If a relative path - name is specified, by default, files are instead stored in the first - directory in the Python path sys.path[0] (usually the current directory) - - - newimportcachedir: - The location where all files should be stored. Use None to restore - the default behavior - - - TypeError if the path is invalid. - ValueError is thrown if the newimportcachedir isn't in the path - - - None. - - - None. - """ - global _importcachedir - - # handle None... - if newimportcachedir == None: - _importcachedir = None - return - - # else, is this a valid path? - if type(newimportcachedir) != str: - raise TypeError("Type of newimportcachedir '"+str(newimportcachedir)+"' is not a string") - - # If it's an empty string, assume it's '.' - if newimportcachedir == '': - newimportcachedir = '.' - - if not os.path.isdir(newimportcachedir): - raise TypeError("Path given for newimportcachedir '"+str(newimportcachedir)+"' is not a directory") - - - if newimportcachedir not in sys.path: - raise ValueError, "The import cache dir '"+newimportcachedir+"' isn't in the Python path" - - # set the path... We're done. - _importcachedir = newimportcachedir - - - - - - -def set_shared_context(context): - """ - - Set the shared mycontext dictionary - - - context: - A dict to use as the new mycontext - - - TypeError if context is none - - - Creates a python file correspond to the repy file, overwriting previously - generated files that exists with that name - - - The name of the Python module that was created in the current directory. This - string can be used with __import__ to import the translated module. - - """ - global shared_context - if context is None: - raise TypeError("Context can't be none") - shared_context = context - - -#Ensure the generated module has a safe name -# Can't use . in the name because import uses it for scope, so convert to _ -def _get_module_name(repyfilename): - head,tail = os.path.split(repyfilename) - tail = tail.replace('.', '_') - return os.path.join(head, tail) - - -def _translation_is_needed(repyfilename, generatedfile): - """ Checks if generatedfile needs to be regenerated. Does several checks to - decide if generating generatedfilename based on repyfilename is a good idea. - --does file already exist? - --was it automatically generated? - --was it generated from the same source file? - --was the original modified since the last translation? - - """ - - if not os.path.isfile(repyfilename): - raise TranslationError("no such file:", repyfilename) - - if not os.path.isfile(generatedfile): - return True - - #Read the first line - try: - fh = open(generatedfile, "r") - first_line = fh.readline().rstrip() - current_line = '' - for line in fh: - current_line = line - last_line = current_line - fh.close() - except IOError, e: - raise TranslationError("Error opening old generated file: " + generatedfile + ": " + str(e)) - - #Check to see if the file was generated by repyhelper, to prevent - #clobbering a file that we didn't create - if not first_line.startswith(TRANSLATION_TAGLINE): - raise TranslationError("File name exists but wasn't automatically generated: " + generatedfile) - - if not last_line.startswith(TRANSLATION_TAGLINE): - # The file generation wasn't completed... I think this means we should - # silently regenerate (#617) - return True - - #Check to see if the generated file has the same original source - old_translation_path = first_line[len(TRANSLATION_TAGLINE):].strip() - generated_abs_path = os.path.abspath(repyfilename) - if old_translation_path != generated_abs_path: - #It doesn't match, but the other file was also a translation! Regen then... - return True - - #If we get here and modification time of orig is older than gen, this is still - #a valid generation - repystat = os.stat(repyfilename) - genstat = os.stat(generatedfile) - if repystat.st_mtime < genstat.st_mtime: - return False - - return True - - -def _generate_python_file_from_repy(repyfilename, generatedfilename, shared_mycontext, callfunc, callargs): - """ Generate a python module from a repy file so it can be imported - The first line is TRANSLATION_TAGLINE, so it's easy to detect that - the file was automatically generated - - """ - - #Start the generation! Print out the header and portability stuff, then include - #the original data and translations - try: - # Create path if it doesn't exist. - # JAC: due to #814, we check for the empty directory too... - if os.path.dirname(generatedfilename) != '' and not os.path.isdir(os.path.dirname(generatedfilename)): - os.makedirs(os.path.dirname(generatedfilename)) - fh = open(generatedfilename, "w") - except IOError, e: - # this is likely a directory permissions error - raise TranslationError("Cannot open file for translation '" + repyfilename + "': " + str(e)) - - # always close the file - try: - print >> fh, TRANSLATION_TAGLINE, os.path.abspath(repyfilename) - print >> fh, WARNING_LABEL - print >> fh, "from repyportability import *" - print >> fh, "from repyportability import _context" - print >> fh, "import repyhelper" - if shared_mycontext: - print >> fh, "mycontext = repyhelper.get_shared_context()" - else: - print >> fh, "mycontext = {}" - print >> fh, "callfunc =", repr(callfunc) - #Properly format the callargs list. Assume it only contains python strings - print >> fh, "callargs =", repr(callargs) - print >> fh - _process_output_file(fh, repyfilename, generatedfilename) - # append the TRANSLATION_TAGLINE so that we can see if the operation was - # interrupted (#617) - print >> fh - print >> fh, TRANSLATION_TAGLINE, os.path.abspath(repyfilename) - except IOError, e: - raise TranslationError("Error translating file " + repyfilename + ": " + str(e)) - finally: - fh.close() - -def _process_output_file(outfh, filename, generatedfilename): - """ Read filename and print it to outfh, except convert includes into calls to - repyhelper.translate - """ - try: - repyfh = open(filename, "r") - repyfiledata = repyfh.readlines() - repyfh.close() - except IOError, e: - #Delete the partially translated file, to ensure this partial translation - #doesn't get used - try: - os.remove(generatedfilename) - except (IOError, OSError): - pass - raise TranslationError("Error opening " + filename + ": " + str(e)) - - #Having read all the data, lets output it again, performing translations - #as needed - for line in repyfiledata: - #look for includes, and substitute them with calls to translate - if line.startswith('include '): - includename = line[len('include '):].strip() - modulename = _get_module_name(includename) - print >> outfh, "repyhelper.translate_and_import('" + includename + "')" - else: - print >> outfh, line, #line includes a newline, so dont add another - - -def translate(filename, shared_mycontext=True, callfunc="import", callargs=None, force_overwrite=False): - """ - - Translate a Repy file into a valid python module that can be imported by - the standard "import" statement. - - Creates a python file correspond to the repy file in the current directory, - with all '.' in the name replaced with "_", and ".py" appended to it to - make it a valid python module name. - Performs several checks to only perform a translation when necessary, and - to prevent accidentally clobbering other files. - The repyhelper and repyportability modules must be in the Python path for - the translated files - Note that the optional arguments used to set variables are only used - if the file is retranslated--otherwise they are ignored. To ensure they're - used, manually delete the translation to force regeneration - - - repyfilename: - A valid repy file name that exists in the Python path (sys.path). If the - filename contains a directory separator, it is used instead of the path. - shared_mycontext: - Optional parameter whether or not the mycontext of this translation - should be shared, or the translation should have it's own. Default True - callfunc: - Optional parameter for what the callfunc of this translation should be. - Should be valid python string. Default "import" - callargs: - A list of strings to use as the repy's "callargs" variable. Default empty - list. - force_overwrite: - If set to True, will skip all file checks and just overwrite any file - with the same name as the generated file. Dangerous, so use cautiously. - Default False - - - TranslationError if there was an error during file generation - ValueError if the file can't be found or directory is invalid - - - Creates a python file correspond to the repy file, overwriting previously - generated files that exists with that name - - - The name of the Python module that was created in the current directory. This - string can be used with __import__ to import the translated module. - """ - - global _importcachedir - - filedir = None # The directory the file is in. - filenamewithpath = None # The full path to the file including the filename. - destdir = None # where the file should be written when generated - - # If the file name contains a directory, honor that exactly... - if filename != os.path.basename(filename): - # since the name contains a directory, that's the filename + path - filenamewithpath = filename - - # I need to use the absolute path because python doesn't handle '..' in - # directory / module names - filedir = os.path.abspath(os.path.dirname(filename)) - - # write it to the first directory in the python path (by default) - destdir = sys.path[0] - - # Let's verify these exist and if not exit... - if not os.path.isdir(filedir): - raise ValueError("In repyhelper, the directory '" + filedir + "' does not exist for file '"+filename+"'") - if not os.path.isfile(filename): - raise ValueError("In repyhelper, the file '" + filename + "' does not exist.") - - else: - # Determine in which directory in the file is located (using the - # Python path) - for pathdir in sys.path: - possiblefilenamewithpath = os.path.join(pathdir, filename) - if os.path.isfile(possiblefilenamewithpath): - filenamewithpath = possiblefilenamewithpath - filedir = pathdir - break - - # make sure we found something. - if filenamewithpath is None: - raise ValueError("File " + filename + " does not exist in the Python path.") - # write it where it was (by default) - destdir = filedir - - - if callargs is None: - callargs = [] - - # expand the name from foo.repy to foo_repy (change '.' to '_') - modulenameonly = _get_module_name(os.path.basename(filename)) - generatedfilenameonly = modulenameonly + ".py" - - # if it shouldn't be in the default location, put it in the correct dir - if _importcachedir != None: - destdir = _importcachedir - - # let's generate it - generatedfilenamewithpath = os.path.join(destdir, generatedfilenameonly) - - if force_overwrite or _translation_is_needed(filenamewithpath, generatedfilenamewithpath): - _generate_python_file_from_repy(filenamewithpath, generatedfilenamewithpath, shared_mycontext, callfunc, callargs) - - # return the name so that we can import it - return modulenameonly - - - -def translate_and_import(filename, shared_mycontext=True, callfunc="import", callargs=None, - force_overwrite=False, preserve_globals=False): - """ - - Translate a repy file to python (see repyhelper.translate), but also import - it to the current global namespace. This import behaves similarly to python's - "from import *", to mimic repy's include semantics, in which - included files are in-lined. Globals starting with "_" aren't imported. - - - filename: - The name of the repy filename to translate and import - shared_mycontext: - Whether or not the mycontext of this translation should be shared, or - the translation should have it's own. Default True - callfunc: - Optional parameter for what the callfunc of this translation should be. - Should be valid python string. Deafault "import" - callargs: - A list of strings to use as the repy's "callargs" variable. Default empty list. - force_overwrite: - If set to True, will skip all file checks and just overwrite any file with - the same name as the generated file. Dangerous, so use cautiously. - Default False - preserve_globals: - Whether or not to preserve globals in the current namespace. - False means globals in current context will get overwritten by globals - in filename if the names clash, True means to keep current globals in the - event of a collision. Default False - - - TranslationError if there was an error during translation - - - Creates/updates a python module corresponding to the repy file argument, - and places references to that module in the current global namespace - - - None - - """ - - modulename = translate(filename, shared_mycontext, callfunc, callargs, force_overwrite) - _import_file_contents_to_caller_namespace(modulename, preserve_globals) - - -#List of globals to skip; we want to make sure to ignore these when -#inserting the imported module's vars into the caller's namespace -# Could also blacklist the repyportability things here.... -GLOBAL_VARS_BLACKLIST = set(['mycontext', 'callfunc', 'callargs', 'repyhelper']) - -def _import_file_contents_to_caller_namespace(modulename, preserve_globals): - """ - Responsible for importing modulename, and taking the contents and - injecting them into the caller's namespace. If overwrite_globals is set to - false, then globals that are already defined in the callers namespace get - skipped. - - Doesn't include objects that start with "_" - - BIG HACK WARNING: - The idea here is to use inspect to get a handle to the caller's module, and - start inserting elements from the imported module into the caller's global - namespace. This is to simulate the repy behavior of inlining includes, which - puts everything in the same namespace. - - """ - #DEBUG - #caller_file = os.path.basename(inspect.currentframe().f_back.f_back.f_code.co_filename) - #print "*** IMPORTNG", modulename, "INTO FILE", caller_file, "***" - - - #Let python handle the initial import - import_module = __import__(modulename) - - #To get a handle on the caller's module navigate back up the stack: - #Go back 2 frames: back to translate_and_import, and another to - #whoever called that - caller_globals = inspect.currentframe().f_back.f_back.f_globals - - - #Now iterate over the import's members, and insert them into the - #caller's namespace - for name,definition in inspect.getmembers(import_module): - - #like normal python from imports, don't import names starting with "_" - if name.startswith('_'): - continue - - #Skip blacklisted items - if name in GLOBAL_VARS_BLACKLIST: - continue - - #skip already defined vars if told to do so - if name in caller_globals and preserve_globals: - continue - - caller_globals[name] = definition - - diff --git a/repyhelper.pyc b/repyhelper.pyc deleted file mode 100644 index 2e92a1f5b0426b9457265f7fdf7fac89068ecb08..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14404 zcmeHOO>7)TcCMZojmQyYO0p!%>-D;p+BHWGCD}ra>^kHki!BC+ZiZFZFVua19zh+p$hxP(eQ#nn?`MfE-P z6gT$1qMlZY`>J|cRUxi571q?#Qz|^AqPps$WLssk8*=lC3eU*Ryb9;!=9~)8%FTHdT~HwkR8;h;3g^}5X!YWOmwbOt zeSS(ky`-YcDm<@3RKd%OYP+UBLr)e|cuAyPp`2FpGQ0ess#N37<1F3If`Q%$2GRE$ zT4&L4+>83dD7!HnHyU?Zd64B%$WI&T^JuUYW%|}T@BBJGt&H+sn(@m@5@u0gbSv78 zlL#*!M#1yAKZdmDv57*xHNLGs7@0gy^u1s>Obx0(9%Z^jmAdjy(}GV_ojQ1XF+26K_2NK z36K5I#)o!1VEc^*oW0yL+`=}32iWQl6*1!F@Ubg*U8{s^)Y%&F0Py$DK6_b|K=e1cOo3$C}^-hdLeQ%R0`Nj84*Aclr^QL@(V#eT6KE3Ow|0J|~A1!6hyoSSZ(nH=p$ z1H6?t)3eoR#Q%x^Hq1LPtE*S9Y8Xa3Kjj`U2l894HE0|gjFfWb5rs}~AS!?s* zT1&sbw!W&LtgWx>yQ})q$E*Gme{FMfb>o{-NUeZ6{rZ4;7E*I#?Ewz0CVTaP~S@2!ep$`CngUERU(Z=;v!I(ulcjt;Vj zD`}Xe!(bcg!xl03(X!67F{~{aNq54iAIUcvdk)nNZIwInL3h)vgEnBqb_`_pyUr{i z)>D%cALwh-Od~GU>7SMPs?MNcG~SGZq?*ssKd=(I*4utTUdI% zX!oWY$o3#j;ylgTrjy0P{N|y@1F-5->Mi`53NCf;KsWF4eGQjf0jZ^P@8LDQowOdu$s>4P$`Ku}}%w95WM z*&Cg9?Ia51 zl(^p=B^`lE6XgbCEee$Y3Pi_BfI}qMV?9{B+KX`uH({^}qmZ)hj&&GygHa#gY0F^p zwaBxD=o^Uha9h1-OaOq%j^Ga*%2{YGff=wtk2VFYB8yf;6t#XYj>2Uk-7zi4iOHiN zbQ%_oSa20dVVp%Bs6Q^gO~I@-V{?OG{*!l}>E_5-G$MA1t;1E7wXZ=Ke(tPf+oJ)T zm?1}jTZs0o8|(y~UWC%Oo8w_5D5vADV6b>9@n_G2ejL)fYW;E09~H$czuAM!vK5YW z2(v&0FJ=#e1sQC+6@yN#cDoo|*E+Rl3;gOw`6w&YDn4wO*RimvBp2z?rk2ckd|%?k z(vLY+t5q*}Z&c^J*$V%i_byIQmz@*T z{Wflhx)rrMqq5gIZvftDEDaF&Fk+BV`WH+q*UJ=h*WmoQWf0<7bm@A;^I)|H8JRGv z9WqDfg9;;Dv91byL^v$hIv8xUH<9SXe5oBdp>|bDD8iPHF8_NDkAotzAU?u>f;E_7 z3Iue#NIWdREo($SSwU<`c;j?rK<^-5PjNhpvl1CZFJj|JDLR8tfk5dxalqub#sx%S zXzaCH$(E4Vs(bM7)`yhfd<6Cw7RA)n;mp^#U=}ego3tN z_z$b9xy03C>4b|lATohnZOE$dXYk#huu6P%`4l#kcu}d;s5sdxNaG7tBF=3QQyef`xP09#znA_x5t+CJH zz}S@o$|0^ZJpvkr>%L@B_G{|JMULeF5ID`9Pexbbm)5@bRGCvC-dJ6#Lmyqn0JkC=;epyvJXH-7N*XAEZdB~g;0s4U!mL>j8 zR7a)!tgH0q&MTtqOfeMmjzaFws+Zt|&%x z;dK{FAn-twTdddQf-)@(x4zelI=hHGyAwbWTuP!Slu!+pR|Zq$c=uCqN<#)0^byiY zZ5TEzH$)BsK7S&LaZnkG7OPKh1;hr#4AX0p=o#qkGzHd#QDBM4_3I%U6Mcd{;4>U+ zLAW0X0$#sPh>Af3jt_nhl`1uvBtQ`IgJPv4gpAV4bPYmOn#J33f_`EZ$6e4&ZQ=xF z`K=!yJ})o~PRG`TVuse4ASW~G^hD}yG2P53QV2n@6fCtF4V{ANP3Zj%~w%#3MbfhY&en<35w-!W+RIQ)I`El4sjppw4B9} ze_&Wi>+JNw`vqav9*Oe)8N3xfVwkKm%!1@>a?@YgXsxeou07glZ>~I8U)xv}`U2#! zSS2#K{E#vdnnMJKkX#fn0E|2aM6uQPgT9HH0`ufvun@+0F0;X|2xaQ_L=U0#ZBd>z zoTsuc*o1r`;o`j6*JO-Qei2tpL3?T{+oG5aDP41IsB2%-_i*Kk&g=+O;{C~ED7okX zwPMqzSw`WX;*0qXE~;_Cy8yOTt5)HFE`Wob^%}Uof~UpWy!UElzFMnZ#+SKD4bLo7 zt8_TJRGrg&55MNmF&5))fjDqZf+2uiV$H|Jd!SV5%%lJ?Zq9wK9rMkzaqt9^PbrJ3 z2y=m{e+*2m15-~kxh=774VVgu17=SE6PWtb|4`$%TY@LGlV?4E*3@*%H;-Vioa{zX_z}Yc4 z$?*mPu@*lKxEIoxmYrD^7KK2oO+c9qhiM^%;bTmA86atMp~!}h$>YK(M_?yJ5kiQE zg3<)!FTf-e!;gTV;NhM?;k_VXC`Sac5*V>fnIht@DmuCZEPMn1O1Efz%$x^XF^CTW zcS$tsx4{*@yhM?Mk}%TMqA;5GHp`Z;} zDEmvidm9(Q%O@-T#@fb%_WH`*)pbFAbT|{xYa{E5RH)yJZE+49LSYBKKrW-p z1CT5Hk>t)X;j;gtSkd+JfgSg6qPYL76pUYcIVoQLCIo_JT99sA6OfLyxl-Dk+`mT= zbBis{R;%CjzV5y1%~uwyRaM1(!~GB3tTepKmCIhuJ6ElF8c6w8rNgC1pHbrEKjUG1 zjZjBWgV}?)1G~Waa1VwFZistuccG7;0I-lj1l0X5pzf6N#^*`*b^sbE;*nu4ND=tk zE&^f{jFjhj2{4bSC^#&+Mxx@WL#X)Tw-p%c{bwHwYXzx9&QRbf`v*cOyrO-46Ks>Z zohHGwe05)Pi#6aRo)@r-xBy7|^bZI`HB_yvd3bn&t;AN7?#@KGAhn1*8W)Lh5pLlO zk$vGF1pxtzsM}ixJ!8fRQEdW($h1(WKMKL&xU&KD>G#Pgfddn>yHVHz<5d3}Y>tu5 zL5@`LA!_4fYQxokoSdo6wdRu4T)MW=l$eQCB<)f#9piJwTN`2t%Y32%B5&b4k6M=R z;FGhjw|K{u>|f)BsMO>d5y~Q?AgjL^DF282CVUEu5oBdL8ViFw5X?r} zUqA{rma)cpt;3{XvDdh8Z3##e zBQQTF4$?djgn>IOhyw+AKvkQPcNGvZ2}3Z;9xF%-vfORKPnW$US1%sA#_m6RPr))= zT`&W&5}NpI@e%@OsbKa?-Y{C6p|X!Z{iVXs6W9*=&&1YS(x42%0;tABGpLLX0VQ0) z6>nQ2Q&J^Fg$IX=LjgTt$=C%zM2E%0Y8b>99_}HC)odMH*oUN^ z4si`6=pop01@Pr046;x!+6A?UOfL2+m^jDI9!tsPD`(6AGr~oVf%uh%OolO{-* zNC~554+3E{=z!fFvH3x`1ca3szAO%K!~oP0yFoG{l0KA1+`a?5LUNi#lNPUEmu~Li zxCfiDn@RC@V)vppQ;ook`k97Dw|Hx9y6}ZV!Mgw1=eg{R>nY2+%7YmIE2Dv2^gT;xpF5v(u@Zd zI7%{n#nzY0@G_>g3-T~^E3|%8OcT$EAO^=C9o8U554+pgMgdLdWR{GNdo;2kU(5&x z&gna~fh9S5q_Snh@4z6tLfEgO9pRuwx|bloWVa7BvlAk%AeCRDCguvNy46;cKUTj| zvBEZ6SK*FRi2XZ#tSKJcr?l20rQmn4AqnWR`^s1v6H9?@#EDEaRH9b@zfJSi zw`nkLF}Q*3dMd4I>#sdIfV8wGTIO6kcygA^{HF#n=obrUGp~GBeHfFX6NQ8!W(3R|0RJp|H<* z)2Ft=`+f3$pNCBR`?%N;V~vk1yxir5^jLO-51Fz=7wjNLOAaFOo{iHSCI2X)e`InN z;5}dRp8KuEsk|eV7&=Wt2Kv&Rn`xyb7Zv0SJ96Yp(f)~t8xnUFz9M>qamO7fpl;QX zu&h_FRO+>FRL^1y`AsA#zjo@JcS#bOb#De+$_te`Hg*$-ia3n;JP$h(?k~6 zU4p3W*EO6(L4F>o>teGLvSBcS*0hd-n{I=xff8}bkqGbLNE^3Oi)5A@P@iUH^lmjd; zp?x`;LQ!puAi}?hx;*)+Jf-9T+KMDWkrr%#QYb)5^M8Y;1aj!A45*|p1>Y#f{ls|6 z{}w~xQk$BU|0#lkXbR`3pGR$LlO?Y^ys9Mbl9Jk$EW)G0s3+F!)D{+`2N8~)PS+{Q zy8{cBz#++8(zG1;aGYp!%{XB&+QO6^Te9$h$S*KRzL5;NwqQiNQ8YY+He^3^(N3N_ zZp|ZhowN@Qi{(CHqy>RP;r}Lio3bVdqr5MqT{lNVB*hpC7D#LHZR(s1F^Cue5GjG9 zj4YR>Ym%H~K+k}&i}Qxqv=ve=5tx(cC_*t6cBm(KOg*XUfnzf~+hyNxi1YBD(%;bO z6sKG`L3W=3hXC7o+NKdknVkC-oVVHaV_1U!o0LVuadvO-!V0KiwC+MVNb7RPTS#EID}iqEq-`x!=7A}jJ!^_s5s+Oh!@{pI(A#(?%D(WaOK{Q zwA(_mPK2(T2Q=iEK9xs*83-cTfImSIlEdcl9X1GZet095(Gp5V%o6jIb$Y zG!MGq&tz-~SpuaOvb2o{ZGCRlZ^&MW97dR&c@yUD21io=4sN=VVWGiv+gm|rSIn$i ztO^l=aU94WKN z;X5319!0hSIbPC~EGhi`Oq!fH%J@HlHryUCgc}@`J9X*Q*KszKbiNLJ2^u)i@Wj=rrqo$K>0))dm4}>d^z;OakAlA8JWpWhp+kJIxj?R{~0cF zHU;+Qclm~@P6j~kPHLla2BHh9&Ye|5=^TT(>Rh$nsMjvn>)z~ay)yfq**9kEv)`Rv Po_*s|I_u59bEWcMHXqQD diff --git a/repyportability.py b/repyportability.py deleted file mode 100644 index ef48be7..0000000 --- a/repyportability.py +++ /dev/null @@ -1,313 +0,0 @@ - - -import __builtin__ - -# I'm importing these so I can neuter the calls so that they aren't -# restricted... - -import os -import sys -import safe -import nanny -import emulfile -import emulmisc -import namespace -import nonportable -import virtual_namespace - -# WTF!?! repyportability uses repyhelper to import dylink!?! -import repyhelper - -# JAC: Save the calls in case I want to restore them. This is useful if -# repy ends up wanting to use either repyportability or repyhelper... -# This is also useful if a user wants to enforce restrictions on the repy -# code they import via repyhelper (they must use -# restrictions.init_restriction_tables(filename) as well)... -oldrestrictioncalls = {} -oldrestrictioncalls['nanny.tattle_quantity'] = nanny.tattle_quantity -oldrestrictioncalls['nanny.tattle_add_item'] = nanny.tattle_add_item -oldrestrictioncalls['nanny.tattle_remove_item'] = nanny.tattle_remove_item -oldrestrictioncalls['nanny.is_item_allowed'] = nanny.is_item_allowed -oldrestrictioncalls['nanny.get_resource_limit'] = nanny.get_resource_limit -oldrestrictioncalls['nanny._resources_allowed_dict'] = nanny._resources_allowed_dict -oldrestrictioncalls['nanny._resources_consumed_dict'] = nanny._resources_consumed_dict -oldrestrictioncalls['emulfile.assert_is_allowed_filename'] = emulfile._assert_is_allowed_filename - - -port_list = range(60000, 65000) - -default_restrictions = {'loopsend': 100000000.0, 'netrecv': 1000000.0, 'random': 10000.0, 'insockets': 500.0, 'fileread': 10000000.0, 'netsend': 1000000.0, 'connport': set(port_list), 'messport': set(port_list), 'diskused': 10000000000.0, 'filewrite': 10000000.0, 'lograte': 3000000.0, 'filesopened': 500.0, 'looprecv': 100000000.0, 'events': 1000.0, 'memory': 150000000000.0, 'outsockets': 500.0, 'cpu': 1.0, 'threadcpu' : 1.0} - - -resource_used = {'diskused': 0.0, 'renewable_update_time': {'fileread': 0.0, 'loopsend': 0.0, 'lograte': 0.0, 'netrecv': 0.0, 'random': 0.0, 'filewrite': 0.0, 'looprecv': 0.0, 'netsend': 0.0, 'cpu': 0.0}, 'fileread': 0.0, 'loopsend': 0.0, 'filesopened': set([]), 'lograte': 0.0, 'netrecv': 0.0, 'random': 0.0, 'insockets': set([]), 'filewrite': 0.0, 'looprecv': 0.0, 'events': 0.0, 'messport': set([]), 'memory': 0.0, 'netsend': 0.0, 'connport': set([]), 'outsockets': set([]), 'cpu': 0.0, 'threadcpu' : 1.0} - -def _do_nothing(*args): - pass - -def _always_true(*args): - return True - - -# Overwrite the calls so that I don't have restrictions (the default) -def override_restrictions(): - """ - - Turns off restrictions. Resource use will be unmetered after making - this call. (note that CPU / memory / disk space will never be metered - by repyhelper or repyportability) - - - None. - - - None. - - - Resource use is unmetered / calls are unrestricted. - - - None - """ - nonportable.get_resources = _do_nothing - - nanny.tattle_quantity = _do_nothing - nanny.tattle_add_item = _do_nothing - nanny.tattle_remove_item = _do_nothing - nanny.is_item_allowed = _always_true - nanny.get_resource_limit = _do_nothing - nanny._resources_allowed_dict = default_restrictions - nanny._resources_consumed_dict = resource_used - emulfile._assert_is_allowed_filename = _do_nothing - - - -# Sets up restrictions for the program -# THIS IS ONLY METERED FOR REPY CALLS AND DOES NOT INCLUDE CPU / MEM / DISK -# SPACE -def initialize_restrictions(restrictionsfn): - """ - - Sets up restrictions. This allows some resources to be metered - despite the use of repyportability / repyhelper. CPU / memory / disk - space will not be metered. Call restrictions will also be enabled. - - - restrictionsfn: - The file name of the restrictions file. - - - None. - - - Enables restrictions. - - - None - """ - nanny.start_resource_nanny(restrictionsfn) - -def enable_restrictions(): - """ - - Turns on restrictions. There must have previously been a call to - initialize_restrictions(). CPU / memory / disk space will not be - metered. Call restrictions will also be enabled. - - - None. - - - None. - - - Enables call restrictions / resource metering. - - - None - """ - # JAC: THIS WILL NOT ENABLE CPU / MEMORY / DISK SPACE - nanny.tattle_quantity = oldrestrictioncalls['nanny.tattle_quantity'] - nanny.tattle_add_item = oldrestrictioncalls['nanny.tattle_add_item'] - nanny.tattle_remove_item = oldrestrictioncalls['nanny.tattle_remove_item'] - nanny.is_item_allowed = oldrestrictioncalls['nanny.is_item_allowed'] - nanny.get_resource_limit = oldrestrictioncalls['nanny.get_resource_limit'] - nanny._resources_allowed_dict = oldrestrictioncalls['nanny._resources_allowed_dict'] - nanny._resources_consumed_dict = oldrestrictioncalls['_resources_consumed_dict'] - emulfile.assert_is_allowed_filename = oldrestrictioncalls['emulfile.assert_is_allowed_filename'] - -# from virtual_namespace import VirtualNamespace -# We need more of the module then just the VirtualNamespace -from virtual_namespace import * -from safe import * -from emulmisc import * -from emulcomm import * -from emulfile import * -from emultimer import * - -# Buld the _context and usercontext dicts. -# These will be the functions and variables in the user's namespace (along -# with the builtins allowed by the safe module). -usercontext = {'mycontext':{}} - -# Add to the user's namespace wrapped versions of the API functions we make -# available to the untrusted user code. -namespace.wrap_and_insert_api_functions(usercontext) - -# Convert the usercontext from a dict to a SafeDict -usercontext = safe.SafeDict(usercontext) - -# Allow some introspection by providing a reference to the context -usercontext["_context"] = usercontext -usercontext["getresources"] = nonportable.get_resources -usercontext["createvirtualnamespace"] = virtual_namespace.createvirtualnamespace -usercontext["getlasterror"] = emulmisc.getlasterror -_context = usercontext.copy() - -# This is needed because otherwise we're using the old versions of file and -# open. We should change the names of these functions when we design -# repy 0.2 -originalopen = open -originalfile = file -openfile = emulated_open - -# file command discontinued in repy V2 -#file = emulated_open - -# Create a mock copy of getresources() -def getresources(): - return (default_restrictions, resource_used, []) - -# Needed for ticket #1038. -# `safe._builtin_destroy()` normally removes the ability to call `import`. -# it would be called inside of `createvirtualnamespace()` -# If we didn't do this, we would not be able to call `import` after -# calling `createvirtualnamespace()` -for builtin_type in dir(__builtin__): - if builtin_type not in safe._BUILTIN_OK: - safe._BUILTIN_OK.append(builtin_type) - - -def initialize_safe_module(): - """ - A helper private function that helps initialize - the safe module. - """ - - # Allow Import Errors. - safe._NODE_CLASS_OK.append("Import") - - # needed to allow primitive marshalling to be built - safe._BUILTIN_OK.append("__import__") - safe._BUILTIN_OK.append("open") - safe._BUILTIN_OK.append("eval") - - - # Allow all built-ins - for builtin_type in dir(__builtins__): - if builtin_type not in safe._BUILTIN_OK: - safe._BUILTIN_OK.append(builtin_type) - - for str_type in dir(__name__): - if str_type not in safe._STR_OK: - safe._STR_OK.append(str_type) - - safe.serial_safe_check = _do_nothing - safe._check_node = _do_nothing - - - -# Override by default! -override_restrictions() -initialize_safe_module() - - - - -# This function makes the dy_* functions available. -def add_dy_support(_context): - """ - - Enable usage of repy's dynamic library linking. This should only - be called on the module-level. - - - _context: - The context that dylink's functions should be inserted into. - - - Public functions from dylink.repy will be inserted into _context. - _context should be globals() for a module. - - - Exception is raised when a module import fails. - - - None - """ - # Add dylink support - repyhelper.translate_and_import("dylink.r2py", callfunc = 'initialize') - - # The dy_* functions are only added to the namespace after init_dylink is called. - init_dylink(_context,{}) - - original_import_module = _context['dy_import_module'] - - def _new_dy_import_module_symbols(module, callfunc="import"): - # Remember the path we are currently in. We need to change to - # this script's dir (assuming it also contains dylink.r2py and - # rest of the Repy runtime and libraries) so that dylink is - # able to link in code from the runtime. - # This is required due to Repy safety measures that inhibit - # dylink to access files outside of its directory. - # Once dylink is done, we return to the previously-current - # working dir. - previous_cwd = os.getcwd() - repyportability_dir = os.path.dirname(os.path.realpath(__file__)) - os.chdir(repyportability_dir) - - # If we are using repyportability, we want to check all pythonpath for - # the file we are looking to import. - COMMON_EXTENSIONS = ["", ".py", ".repy",".py.repy", ".pp", ".r2py"] - - # Check all combination of filepath with file extension and try to import the - # file if we have found it. - for pathdir in sys.path: - possiblefilenamewithpath = os.path.join(pathdir, module) - - # If we have found a path, then we can import the module and - # return so we do not continue to look in other paths. - if os.path.isfile(possiblefilenamewithpath): - filenamewithpath = possiblefilenamewithpath - importedmodule = original_import_module(filenamewithpath, callfunc) - os.chdir(previous_cwd) - return importedmodule - - # If we don't find the file, we just call down to dylink, and - # let it raise the appropriate error. - try: - importedmodule = original_import_module(module, callfunc) - return importedmodule - except: - raise - finally: - os.chdir(previous_cwd) - - _context['dy_import_module'] = _new_dy_import_module_symbols - - - # Make our own `dy_import_module_symbols` and add it to the context. - # It is not currently possible to use the real one (details at ticket #1046) - def _dy_import_module_symbols(module,new_callfunc="import"): - new_context = _context['dy_import_module'](module, new_callfunc)._context - # Copy in the new symbols into our namespace. - for symbol in new_context: - if symbol not in _context: # Prevent the imported object from destroying our namespace. - _context[symbol] = new_context[symbol] - - - - _context['dy_import_module_symbols'] = _dy_import_module_symbols - - - - diff --git a/repyportability.pyc b/repyportability.pyc deleted file mode 100644 index 81bc312ec93fd6f4cc2c4101d7d635d17623c065..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8156 zcmd5>OK%*<5w6)Mmm(!uZ&{W!vSfQ@$)q2$;uwUZXJR214pVf1!Wjx@>2kQ2!hNXrQ}`4r zv>!x;(T8o{5xT67kUC6{a6CuHkz*KeoWiH=@Dr$>q3|Rs&^Uz(^iQL5DLjMfSqjhE zzH_LaqwslKy+D^&j#6}x!luxQa7uu9byUnCDyE8x`9#HhqT&@%@fxVWCs$1Myah)j zuz@v_4KQCAwS(Sh1n(XrCW&JMqY=DsjNsj%vgebNx95{1yiW`m+vhO~U!+fP?>M## z0z6IOMFCDw_>urP!#M#?inPpA+@NKaS3u<a>1_ zl*>ops4}fTn28Q;6% zWufpIsS~`4qh6CL{FXFdV#Qr*z5z{+zQo_6J`6Akl4LMv0%Kz3-|q*BiOgVUPY{NF zWK>rl+dd~#-E>3Q&Jn>#ON$@Gak{C(v1x0{_?gmaKWiyJj=GW2CxNE_19O; z6o5EQds-!-wEpyyxvQp*)l9iRE(cgN>`#WJ6tCVKy++Ad>dY_(q8S z@f`!JPpw{G_%I#b4luiY^ccSM46AMis}P}nF9buriMpzV4s!C}_fYE!ijW|L+`8CA zK>`}?2(mS8o48%`g!xX|RrA4WC(<2tZ@$$Dk~O9MZkj|U&3xU;qMn)GhGwca;L{9N zqZqP0*Bi+3_+jcNsp&+?+C`>c;#Hi-^JqpqGN;1ca@JSEtalJi-w>lSAPte-3cA}4)Gnc zb|w_*N5 z)eicxSoNVen4{=;f;AWZaSYZ4h>2sbGB$d`vWqb*EW0_uBcE*EM|vsHu38Z`bZ~W9 zhTCUmhnu~G4$TKj2OFh^GyZ86_HU@QPhl9ecHN}MV24Wg;6k8c5JaeG$|^izETL;74Pu>qoFe9$GOmxQ z8Cy=~fvpLCtvv{0EsVhxLy!v%*rAZ)hucox%ol{|0pslET=wM5KJR3Fu=IXyRlX_Q z=?TjJL0S3!yyJYetP$D-h(3zj@I^DNquYz#afaK;NMd(YSXuufxXCDpqmL)W^)7SB z4AUWTe+&)96PX>0Cxr6&(6J!F5~M6WWs8WX2`d&6ZxdE60*?a%HH*Oa0I6F-KA*t*_C(cL|*%|}uB;j+vQiOMg{Qs}XfcGp* z%vltLQ}~K-f3eBV({RX^#^IRe@rh~CSnp|cdhA!cXSm>O#N&j+bGhKvJYLQ4xc`mk zVfL-bobWqcG;0nM=+LY){;{@(UC)aWwRZ>bs75`o>4sk*9Bsm<7j{Y;_Wmsd_y)?9 znSKznofTC;peSSGKqH9D(rjos%ILU`pXR6px(=&>+@M_^M)_=7ywhBqK`}CSyY8Sp zj+TFm-+08#s}EWHGZ3%{-@A@iDo1i>#*tbBFKxri+wh9u$j*@uAmT#e&Dr@Du_L!s zI6J?c!=RWu`1`jB-@Ez5+HfKw07nMbC9VH7hk2r9?f$2P?_C5yAnzk=w*U-h6{pLo zs9r=Nmg%aShrPWl+Q0?6?S3MWxechfQ@bN8D0A3*>YxofnudL3&9;&0-s(zn%C_0} zBSCy$(jZPS1yCD7oNE_nZ%oIqck>tT+_>pqf9LAbl7Hv>l3xZrObTrlcI~}e?=0V1 zrBbAEPFuej4UmCm|>$~4xo_T!?F3isNd?S);&vHKxBEL zYVy4c<(#f>20bN|5w{64lEhtgysSf`bG9tn^Zf%e&XF$F_m}rO_*4!$`<$aCTQ(dC z7Wq{8K)Kd!6jop%tq{i9W+Bh#yPj0Ae@%3k;~z&mC$5sL`hmZHa){?gj3H zTB5HmeGoDs0&l!9^25Kket0tFvrYyPW3V=i8DG$DIKT}>EjNxTtkof|Wwn z5eC~;u^2=NPe52MN=$0$?@Zg??XSkz!f1XwOS{+^)1Bi;c$mgckaG*xtjZMF*y?L> zx{6@8dERX!4-JMIhND>6o>~rZ7vX#sM0oRYH#-u<*fVmi2DXx0hS7; zlN$jPHW6lV%Qn{Np*5jH*@c`!4k4XuHQK7sgCFtR1Sa43aEZ1ghGa@p^sp+nvqW1G zOLAj_9*X76K44(0&I<6bE|wC*#4xU7L2JVwPSMi+59sd4O&T1Pkr)h{DkHh`ZZb{K zqn{ybcIf@Pc$4B?4ClX(hNUK3o<+1{^Q??`NYb2Vi(F_vTeMijcK7mb9KA%f&{9Ml zT22*iwzV0G;p_#bBUTNzk8PY>6P#`=9Zc7k*GkwvUar+)8_6r^fffSmDMXs@CDNP> zHTjwZx3}nBXPk{{5fuBpG+*MjY!45onPGE!{m$*%cNYDd?=Rn6T)K52H^EUk|#gX{^8G70>ayjTz^ZGg~=U+3y^#9B^td%CpW?rA%c&Q%>30Qxbh(v&=uy zhqnPh202lJpSRMrE}HS+0?Xlz56`k9Km{<01A~*?3iLQ|R15b{622^ZOX95-y_+Cu zpBU#D;-x$fg35&ZM%DkK`b22mU{~ zySrzh=I1kPN%wmkAAQbCa5Eh~Se`9Smku@@n57b=a>!~6Pzt95gQEVA8&Zt zSkt1+C`6YqOI`xK=Xe1=S?>Z0u~f`zEA4iL3M2f~pUHOdrj!*D$?CV61wDI<;*0&D z7x_b1E5w!%z}>(Lvd1>Ydzp(@cp(=3R@xibtGvWbY|skZF$_Z4@=Wwd17{-48H6bG zWh|dD1KcfXg7+p`yenLMn~OKNxW)za@taIiWZMhY=)EOYy@Lg3sx#1-iBEZ^UYkhj zkl>Z~m3~$JqSPois*SUaBaPG3&hS&6t)Ra=ozI!6)?mnLwZ@c0m*vK(x>K+H8{YpG AVgLXD diff --git a/safe.py b/safe.py deleted file mode 100644 index d68417a..0000000 --- a/safe.py +++ /dev/null @@ -1,698 +0,0 @@ -"""An attempt at creating a safe_exec for python. - -This file is public domain and is not suited for any serious purpose. -This code is not guaranteed to work. Use at your own risk! -Beware! Trust no one! - -Please e-mail philhassey@yahoo.com if you find any security holes. - -Known limitations: - - Safe doesn't have any testing for timeouts/DoS. One-liners - like these will lock up the system: "while 1: pass", "234234**234234" - - Lots of (likely) safe builtins and safe AST Nodes are not allowed. - I suppose you can add them to the whitelist if you want them. I - trimmed it down as much as I thought I could get away with and still - have useful python code. - - Might not work with future versions of python - this is made with - python 2.4 in mind. _STR_NOT_BEGIN might have to be extended - in the future with more magic variable prefixes. Or you can - switch to conservative mode, but then even variables like "my_var" - won't work, which is sort of a nuisance. - - If you get data back from a safe_exec, don't call any functions - or methods - they might not be safe with __builtin__ restored - to its normal state. Work with them again via an additional safe_exec. - - The "context" sent to the functions is not tested at all. If you - pass in a dangerous function {'myfile':file} the code will be able - to call it. -""" - -# Built-in Objects -# http://docs.python.org/lib/builtin.html - -# AST Nodes - compiler -# http://docs.python.org/lib/module-compiler.ast.html - -# Types and members - inspection -# http://docs.python.org/lib/inspect-types.html -# The standard type heirarchy -# http://docs.python.org/ref/types.html - -# Based loosely on - Restricted "safe" eval - by Babar K. Zafar -# (it isn't very safe, but it got me started) -# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496746 - -# Securing Python: Controlling the abilities of the interpreter -# (or - why even trying this is likely to end in tears) -# http://us.pycon.org/common/talkdata/PyCon2007/062/PyCon_2007.pdf - -# Changes -# 2007-03-13: added test for unicode strings that contain __, etc -# 2007-03-09: renamed safe_eval to safe_exec, since that's what it is. -# 2007-03-09: use "exec code in context" , because of test_misc_recursive_fnc -# 2007-03-09: Removed 'type' from _BUILTIN_OK - see test_misc_type_escape -# 2007-03-08: Cleaned up the destroy / restore mechanism, added more tests -# 2007-03-08: Fixed how contexts work. -# 2007-03-07: Added test for global node -# 2007-03-07: Added test for SyntaxError -# 2007-03-07: Fixed an issue where the context wasn't being reset (added test) -# 2007-03-07: Added unittest for dir() -# 2007-03-07: Removed 'isinstance', 'issubclass' from builtins whitelist -# 2007-03-07: Removed 'EmptyNode', 'Global' from AST whitelist -# 2007-03-07: Added import __builtin__; s/__builtins__/__builtin__ - -import UserDict # This is to get DictMixin -import threading # This is to get a lock -import time # This is to sleep -import subprocess # This is to start the external process -import harshexit # This is to kill the external process on timeout -import nonportable # This is to get the current runtime -import os # This is for some path manipulation -import repy_constants # This is to get our start-up directory -import safety_exceptions # This is for exception classes shared with tracebackrepy - -# Hide the DeprecationWarning for compiler -import warnings -warnings.simplefilter('ignore') -import compiler # Required for the code safety check -warnings.resetwarnings() - -import platform # This is for detecting Nokia tablets -import __builtin__ -import sys - -# Armon: This is how long we will wait for the external process -# to validate the safety of the user code before we timeout, -# and exit with an exception -# JAC: I've increased this to mitigate #744 -EVALUTATION_TIMEOUT = 15 - -if platform.machine().startswith('armv'): - # The Nokia needs more time to evaluate code safety, especially - # when under heavy loads - EVALUTATION_TIMEOUT = 200 - -_NODE_CLASS_OK = [ - 'Add', 'And', 'AssAttr', 'AssList', 'AssName', 'AssTuple', - 'Assert', 'Assign','AugAssign', 'Bitand', 'Bitor', 'Bitxor', 'Break', - 'CallFunc', 'Class', 'Compare', 'Const', 'Continue', - 'Dict', 'Discard', 'Div', 'Ellipsis', 'Expression', 'FloorDiv', - 'For', 'Function', 'Getattr', 'If', 'Keyword', - 'LeftShift', 'List', 'ListComp', 'ListCompFor', 'ListCompIf', 'Mod', - 'Module', 'Mul', 'Name', 'Node', 'Not', 'Or', 'Pass', 'Power', - 'Print', 'Printnl', 'Return', 'RightShift', 'Slice', 'Sliceobj', - 'Stmt', 'Sub', 'Subscript', 'Tuple', 'UnaryAdd', 'UnarySub', 'While', - ] -_NODE_ATTR_OK = [] -_STR_OK = ['__init__'] -# Disallow these due to the potential for encoding bugs (#982) -_STR_BAD = ['encode','decode'] -_STR_NOT_CONTAIN = ['__'] -_STR_NOT_BEGIN = ['im_','func_','tb_','f_','co_',] - -## conservative settings -#_NODE_ATTR_OK = ['flags'] -#_STR_NOT_CONTAIN = ['_'] -#_STR_NOT_BEGIN = [] - -# Checks the string safety -def _is_string_safe(token): - """ - - Checks if a string is safe based on the defined rules. - - - token: A value to check. - - - True if token is safe, false otherwise - """ - - # Check if it is explicitly allowed or the wrong type - if type(token) is not str and type(token) is not unicode: - return True - if token in _STR_BAD: - return False - if token in _STR_OK: - return True - - # Check all the prohibited sub-strings - for forbidden_substring in _STR_NOT_CONTAIN: - if forbidden_substring in token: - return False - - # Check all the prohibited prefixes - for forbidden_prefix in _STR_NOT_BEGIN: - if token[:len(forbidden_prefix)] == forbidden_prefix: - return False - - # Safe otherwise - return True - - -def _check_node(node): - if node.__class__.__name__ not in _NODE_CLASS_OK: - raise safety_exceptions.CheckNodeException(node.lineno,node.__class__.__name__) - for k,v in node.__dict__.items(): - # Don't allow the construction of unicode literals - if type(v) == unicode: - raise safety_exceptions.CheckStrException(node.lineno,k,v) - - if k in _NODE_ATTR_OK: continue - - # JAC: don't check doc strings for __ and the like... - if k == 'doc' and (node.__class__.__name__ in ['Module', 'Function', 'Class']): - continue - - - # Check the safety of any strings - if not _is_string_safe(v): - raise safety_exceptions.CheckStrException(node.lineno,k,v) - - for child in node.getChildNodes(): - _check_node(child) - -def _check_ast(code): - ast = compiler.parse(code) - _check_node(ast) - -_type = type -_compile_type = _type(compile('','','exec')) - -def safe_type(*args, **kwargs): - if len(args) != 1 or kwargs: - raise safety_exceptions.RunBuiltinException( - 'type() may only take exactly one non-keyword argument.') - - # Fix for #1189 - if _type(args[0]) is _type or _type(args[0]) is _compile_type: - raise exception_hierarchy.RunBuiltinException( - 'unsafe type() call.') - - return _type(args[0]) - -_BUILTIN_REPLACE = { - 'type' : safe_type -} - -# r = [v for v in dir(__builtin__) if v[0] != '_' and v[0] == v[0].upper()] ; r.sort() ; print r -_BUILTIN_OK = [ - '__debug__','quit','exit', - - 'ArithmeticError', 'AssertionError', 'AttributeError', 'DeprecationWarning', 'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception', 'False', 'FloatingPointError', 'FutureWarning', 'IOError', 'ImportError', 'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', 'None', 'NotImplemented', 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning', 'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError', 'True', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', 'UserWarning', 'ValueError', 'Warning', 'ZeroDivisionError', - - 'abs', 'bool', 'cmp', 'complex', 'dict', 'divmod', 'filter', 'float', 'frozenset', 'hex', 'id', 'int', 'len', 'list', 'long', 'map', 'max', 'min', 'object', 'oct', 'pow', 'range', 'reduce', 'repr', 'round', 'set', 'slice', 'str', 'sum', 'tuple', 'xrange', 'zip', - ] - -#this is zope's list... - #in ['False', 'None', 'True', 'abs', 'basestring', 'bool', 'callable', - #'chr', 'cmp', 'complex', 'divmod', 'float', 'hash', - #'hex', 'id', 'int', 'isinstance', 'issubclass', 'len', - #'long', 'oct', 'ord', 'pow', 'range', 'repr', 'round', - #'str', 'tuple', 'unichr', 'unicode', 'xrange', 'zip']: - - -_BUILTIN_STR = [ - 'copyright','credits','license','__name__','__doc__', - ] - -def _builtin_fnc(k): - def fnc(*vargs,**kargs): - raise safety_exceptions.RunBuiltinException(k) - return fnc -_builtin_globals = None -_builtin_globals_r = None -def _builtin_init(): - global _builtin_globals, _builtin_globals_r - if _builtin_globals != None: return - _builtin_globals_r = __builtin__.__dict__.copy() - r = _builtin_globals = {} - for k in __builtin__.__dict__.keys(): - v = None - # It's important to check _BUILTIN_REPLACE before _BUILTIN_OK because - # even if the name is defined in both, there must be a security reason - # why it was supposed to be replaced, not just allowed. - if k in _BUILTIN_REPLACE: v = _BUILTIN_REPLACE[k] - elif k in _BUILTIN_OK: v = __builtin__.__dict__[k] - elif k in _BUILTIN_STR: v = '' - else: v = _builtin_fnc(k) - r[k] = v - - # Armon: Make SafeDict available - _builtin_globals["SafeDict"] = get_SafeDict - -def _builtin_destroy(): - _builtin_init() - for k,v in _builtin_globals.items(): - __builtin__.__dict__[k] = v -def _builtin_restore(): - for k,v in _builtin_globals_r.items(): - __builtin__.__dict__[k] = v - - - -# Get a lock for serial_safe_check -SAFE_CHECK_LOCK = threading.Lock() - -# Wraps safe_check to serialize calls -def serial_safe_check(code): - """ - - Serializes calls to safe_check. This is because safe_check forks a new process - which may take many seconds to return. This prevents us from forking many new - python processes. - - - code: See safe_check. - - - As with safe_check. - - - See safe_check. - """ - # Acquire the lock - SAFE_CHECK_LOCK.acquire() - - try: - # Call safe check - return safe_check(code) - - finally: - # Release - SAFE_CHECK_LOCK.release() - - -def safe_check(code): - """Check the code to be safe.""" - # NOTE: This code will not work in Windows Mobile due to the reliance on subprocess - - # Get the path to safe_check.py by using the original start directory of python - path_to_safe_check = os.path.join(repy_constants.REPY_START_DIR, "safe_check.py") - - # Start a safety check process, reading from the user code and outputing to a pipe we can read - proc = subprocess.Popen([sys.executable, path_to_safe_check],stdin=subprocess.PIPE, stdout=subprocess.PIPE) - - # Write out the user code, close so the other end gets an EOF - proc.stdin.write(code) - proc.stdin.close() - - # Wait for the process to terminate - starttime = nonportable.getruntime() - status = None - - # Only wait up to EVALUTATION_TIMEOUT seconds before terminating - while status == None and (nonportable.getruntime() - starttime < EVALUTATION_TIMEOUT): - status = proc.poll() - time.sleep(0.02) - - else: - # Check if the process is still running - if status == None: - # Try to terminate the external process - try: - harshexit.portablekill(proc.pid) - except: - pass - - # Raise an exception - raise Exception, "Evaluation of code safety exceeded timeout threshold ("+str(nonportable.getruntime() - starttime)+" seconds)" - - - # Read the output and close the pipe - rawoutput = proc.stdout.read() - proc.stdout.close() - - # Interim fix for #1080: Get rid of stray debugging output on Android - # of the form "dlopen libpython2.6.so" and "dlopen /system/lib/libc.so", - # yet preserve all of the other output (including empty lines). - - output = "" - for line in rawoutput.split("\n"): - # Preserve empty lines - if line == "": - output += "\n" - continue - # Suppress debug messages we know can turn up - wordlist = line.split() - if wordlist[0]=="dlopen": - if wordlist[-1]=="/system/lib/libc.so": - continue - if wordlist[-1].startswith("libpython") and \ - wordlist[-1].endswith(".so"): - # We expect "libpython" + version number + ".so". - # The version number should be a string convertible to float. - # If it's not, raise an exception. - try: - versionstring = (wordlist[-1].replace("libpython", - "")).replace(".so", "") - junk = float(versionstring) - except TypeError, ValueError: - raise Exception("Unexpected debug output '" + line + - "' while evaluating code safety!") - else: - output += line + "\n" - - # Strip off the last newline character we added - output = output[0:-1] - - # Check the output, None is success, else it is a failure - if output == "None": - return True - - # If there is no output, this is a fatal error condition - elif output == "": - raise Exception, "Fatal error while evaluating code safety!" - - else: - # Raise the error from the output - raise safety_exceptions.SafeException, output - - -# Have the builtins already been destroyed? -BUILTINS_DESTROYED = False - -def safe_run(code,context=None): - """Exec code with only safe builtins on.""" - global BUILTINS_DESTROYED - if context == None: context = {} - - # Destroy the builtins if needed - if not BUILTINS_DESTROYED: - BUILTINS_DESTROYED = True - _builtin_destroy() - - try: - #exec code in _builtin_globals,context - context['__builtins__'] = _builtin_globals - exec code in context - #_builtin_restore() - except: - #_builtin_restore() - raise - -def safe_exec(code,context = None): - """Check the code to be safe, then run it with only safe builtins on.""" - serial_safe_check(code) - safe_run(code,context) - - -# Functional constructor for SafeDict -def get_SafeDict(*args,**kwargs): - return SafeDict(*args,**kwargs) - -# Safe dictionary, which prohibits "bad" keys -class SafeDict(UserDict.DictMixin): - """ - - A dictionary implementation which prohibits "unsafe" keys - from being set or get. - """ - - def __init__(self,from_dict=None): - # Create the underlying dictionary - self.__under__ = {} - - # Break if we are done... - if from_dict is None: - return - if type(from_dict) is not dict and not isinstance(from_dict,SafeDict): - return - - # If we are given a dict, try to copy its keys - for key,value in from_dict.items(): - # Skip __builtins__ and __doc__ since safe_run/python inserts that - if key in ["__builtins__","__doc__"]: - continue - - # Check the key type - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" - - # Check if the key is safe - if _is_string_safe(key): - self.__under__[key] = value - - # Throw an exception if the key is unsafe - else: - raise ValueError, "Unsafe key: '"+key+"'" - - # Allow getting items - def __getitem__(self,key): - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" - if not _is_string_safe(key): - raise ValueError, "Unsafe key: '"+key+"'" - - return self.__under__.__getitem__(key) - - # Allow setting items - def __setitem__(self,key,value): - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" - if not _is_string_safe(key): - raise ValueError, "Unsafe key: '"+key+"'" - - return self.__under__.__setitem__(key,value) - - # Allow deleting items - def __delitem__(self,key): - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" - if not _is_string_safe(key): - raise ValueError, "Unsafe key: '"+key+"'" - - return self.__under__.__delitem__(key) - - # Allow checking if a key is set - def __contains__(self,key): - if type(key) is not str and type(key) is not unicode: - raise TypeError, "'SafeDict' keys must be of string type!" - if not _is_string_safe(key): - raise ValueError, "Unsafe key: '"+key+"'" - - return self.__under__.__contains__(key) - - # Return the key set - def keys(self): - # Get the keys from the underlying dict - keys = self.__under__.keys() - - # Filter out the unsafe keys - safe_keys = [] - - for key in keys: - if _is_string_safe(key): - safe_keys.append(key) - - # Return the safe keys - return safe_keys - - - # allow us to be printed - # this gets around the __repr__ infinite loop issue ( #918 ) for simple cases - # It seems unlikely this is adequate for more complex cases (like safedicts - # that refer to each other) - def __repr__(self): - newdict = {} - for safekey in self.keys(): - if self.__under__[safekey] == self: - newdict[safekey] = newdict - else: - newdict[safekey] = self.__under__[safekey] - return newdict.__repr__() - - # Allow a copy of us - def copy(self): - # Create a new instance - copy_inst = SafeDict(self.__under__) - - # Return a new instance - return copy_inst - - # Make our fields read-only - # This means __getattr__ can do its normal thing, but any - # setters need to be overridden to prohibit adding/deleting/updating - - def __setattr__(self,name,value): - # Allow setting __under__ on initialization - if name == "__under__" and name not in self.__dict__: - self.__dict__[name] = value - return - - raise TypeError,"'SafeDict' attributes are read-only!" - - def __delattr__(self,name): - raise TypeError,"'SafeDict' attributes are read-only!" - - -if __name__ == '__main__': - import unittest - - class TestSafe(unittest.TestCase): - def test_check_node_import(self): - self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"import os") - def test_check_node_from(self): - self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"from os import *") - def test_check_node_exec(self): - self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"exec 'None'") - def test_check_node_raise(self): - self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"raise Exception") - def test_check_node_global(self): - self.assertRaises(safety_exceptions.CheckNodeException,safe_exec,"global abs") - - def test_check_str_x(self): - self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"x__ = 1") - def test_check_str_str(self): - self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"x = '__'") - def test_check_str_class(self): - self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"None.__class__") - def test_check_str_func_globals(self): - self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"def x(): pass; x.func_globals") - def test_check_str_init(self): - safe_exec("def __init__(self): pass") - def test_check_str_subclasses(self): - self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"object.__subclasses__") - def test_check_str_properties(self): - code = """ -class X(object): - def __get__(self,k,t=None): - 1/0 -""" - self.assertRaises(safety_exceptions.CheckStrException,safe_exec,code) - def test_check_str_unicode(self): - self.assertRaises(safety_exceptions.CheckStrException,safe_exec,"u'__'") - - def test_run_builtin_open(self): - self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"open('test.txt','w')") - def test_run_builtin_getattr(self): - self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"getattr(None,'x')") - def test_run_builtin_abs(self): - safe_exec("abs(-1)") - def test_run_builtin_open_fnc(self): - def test(): - f = open('test.txt','w') - self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"test()",{'test':test}) - def test_run_builtin_open_context(self): - #this demonstrates how python jumps into some mystical - #restricted mode at this point .. causing this to throw - #an IOError. a bit strange, if you ask me. - self.assertRaises(IOError,safe_exec,"test('test.txt','w')",{'test':open}) - def test_run_builtin_type_context(self): - #however, even though this is also a very dangerous function - #python's mystical restricted mode doesn't throw anything. - safe_exec("test(1)",{'test':type}) - def test_run_builtin_dir(self): - self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"dir(None)") - - def test_run_exeception_div(self): - self.assertRaises(ZeroDivisionError,safe_exec,"1/0") - def test_run_exeception_i(self): - self.assertRaises(ValueError,safe_exec,"(-1)**0.5") - - def test_misc_callback(self): - self.value = None - def test(): self.value = 1 - safe_exec("test()", {'test':test}) - self.assertEqual(self.value, 1) - def test_misc_safe(self): - self.value = None - def test(v): self.value = v - code = """ -class Test: - def __init__(self,value): - self.x = value - self.y = 4 - def run(self): - for n in xrange(0,34): - self.x += n - self.y *= n - return self.x+self.y -b = Test(value) -r = b.run() -test(r) -""" - safe_exec(code,{'value':3,'test':test}) - self.assertEqual(self.value, 564) - - def test_misc_context_reset(self): - #test that local contact is reset - safe_exec("abs = None") - safe_exec("abs(-1)") - safe_run("abs = None") - safe_run("abs(-1)") - - def test_misc_syntax_error(self): - self.assertRaises(SyntaxError,safe_exec,"/") - - def test_misc_context_switch(self): - self.value = None - def test(v): self.value = v - safe_exec(""" -def test2(): - open('test.txt','w') -test(test2) -""",{'test':test}) - self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,"test()",{'test':self.value}) - - def test_misc_context_junk(self): - #test that stuff isn't being added into *my* context - #except what i want in it.. - c = {} - safe_exec("b=1",c) - self.assertEqual(c['b'],1) - - def test_misc_context_later(self): - #honestly, i'd rec that people don't do this, but - #at least we've got it covered ... - c = {} - safe_exec("def test(): open('test.txt','w')",c) - self.assertRaises(safety_exceptions.RunBuiltinException,c['test']) - - #def test_misc_test(self): - #code = "".join(open('test.py').readlines()) - #safe_check(code) - - def test_misc_builtin_globals_write(self): - #check that a user can't modify the special _builtin_globals stuff - safe_exec("abs = None") - self.assertNotEqual(_builtin_globals['abs'],None) - - #def test_misc_builtin_globals_used(self): - ##check that the same builtin globals are always used - #c1,c2 = {},{} - #safe_exec("def test(): pass",c1) - #safe_exec("def test(): pass",c2) - #self.assertEqual(c1['test'].func_globals,c2['test'].func_globals) - #self.assertEqual(c1['test'].func_globals,_builtin_globals) - - def test_misc_builtin_globals_used(self): - #check that the same builtin globals are always used - c = {} - safe_exec("def test1(): pass",c) - safe_exec("def test2(): pass",c) - self.assertEqual(c['test1'].func_globals,c['test2'].func_globals) - self.assertEqual(c['test1'].func_globals['__builtins__'],_builtin_globals) - self.assertEqual(c['__builtins__'],_builtin_globals) - - def test_misc_type_escape(self): - #tests that 'type' isn't allowed anymore - #with type defined, you could create magical classes like this: - code = """ -def delmethod(self): 1/0 -foo=type('Foo', (object,), {'_' + '_del_' + '_':delmethod})() -foo.error -""" - try: - self.assertRaises(safety_exceptions.RunBuiltinException,safe_exec,code) - finally: - pass - - def test_misc_recursive_fnc(self): - code = "def test():test()\ntest()" - self.assertRaises(RuntimeError,safe_exec,code) - - - unittest.main() - - #safe_exec('print locals()') - diff --git a/safe.pyc b/safe.pyc deleted file mode 100644 index 9b6d030ba2810e80a11704bc654432713130b603..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24838 zcmd^ndvF}ddEYm?OAuHBd_Rxm-H^vK03HY)?|nSp0|dZ3h&+Iq1rK+y>AA)B09atL zvz(bFffH#b;>nWY$d+wIar}s#M2-?CkrL(kud3o$QC{UZ<#JwCPAXL?CzbLaPF0f1 zAIk6d_3Z2d1Z_D0DOZKW_V)Djbbqh@Ufr|#-|s8_s(WU-BI&;pzTd_t&KE?C$Sg`n zOipT!%%WoJIhoD1>Uo*Xx9SC%Ewt)8WOhfZ-X*hLt@=)x-Px*l%WQY6-XpU;t$I;r zixL!NX_uJ1+;_xDC+?NmUcJ#L_a$rHEwj5@kM5D#J+1m)ncXV}K<$%Yw=C`FGfM}= z?BI~m?}`Cj`=kyO0TBSf{6#UCvsX-?7>tRbFv31D`^DhFgJKSeIV|Rgn4@BjiFrcI zaWN;voD}n9+^xgzErSpYWx?Yr^`o(0SkS^?yr(#i=5PRi_)65tOgT-Cp8a$jjb z3yj{=Zl|Q`NC506<>gsmI3?z~m_H%L6H^j1E#`)p_mvzoVm=V_p_o}QKPcvh#M~6~ z!(x0fABic8nG;hHW5fhv=EW?CSrk(hvm|C&Oij#+n7WuyOhe2qF_D;9Od_T!=C+tS zVphehiTPN}r^VbAb5G2Vi1|@5e^Sh!67v}`_r-i7=CfjcOw8xRd|u2K#QbS7KQ89a zi20(JpAhqtV!kBir^NiUn4b~zWifwN%vZ$xIWa#g=Ff}yIWa#k<`=~LqL{xR=9k3$ zvMiKj7To=dV*ZktzbxjfVtz$Qd`8S)QA&a9IVpqdD8cxzX(QPGSG58m_-k5uR)U-? z4N)#<}5LyX7PaRvHPu-AWXclWKjzExU1fKJbIppyJMlk=s~H7Q^~*u{gb0jotZbEpYMO zXwKED71x9->oA4I2g90R6?Vp@PUP9IV#ta z01qXhdnb&Rhus@-K$zCTX5@x<>TXnxmroTh2Y1R*aLRS3qh_37HaDyXr;5evwV;d# zf^&eS<~A0qwZ(EA2WxMyl^4TsxDu|o)p-sE=mDq&tI~|B$(p+u)`A$wU9EGTT6LwG zfDB+P)vEO% zigg5zQmZZpZn78v$UD_q&8>x%Ww+U2lN+za;K4=r^c}z*xEC(E4FG?7$UXi13oqjD z>{4?+$R#+S;IU%bjahYXB%#PLO}@NNL)g1ewtk1iL!eWA2C2ZD=yAVL$?v&R+iUeK25RkG~%LY>#H{)Qw30|t~;Xwn*vHW9WOF{kn(0skL*Oe%tw4rU z{^y(ZiY}9ES}cT>0K7Ia77Zw}meP)#0^L+5l{&tk3ajtC5tb2%)utvWP=$Qg!)T>k zgJ6`C02G_aIIY52UZBpoT`dC?ifomTuzO2ywPa-)j5!U8Cg9p>s3OX)73>WDmfGTi zh6F?D07VM7B};}L7bV4{3@X(Zf`~e;HHiD^{*^T<#{P>e@2Nnjiqq8&>?s1-P~FS-yFi}vZBJFUU zL?XGaLknAaUXQAEOAM{mYdW|WK%3QddXMVI!ptX2Fu9hsS_|ivbmUU9lIV=3=A2@R z@3<0G8;QX;gCphv9jgL0_ezjgrd|yEc>J($Gnn0_hEp8I?>WZ(= zav9i`WX@NN=j~-B^eYZJva`wwzT=-ok%-)dOXEo5$a0TF105~7R!d$==mZ3pyCiy5 zFqK|+>Giu@6{XcD1zaAxd06f_aub4ottpvV#mL?0;L5!m?}9pabDY@47rm&4 z?b?tTjyFJP0w?FlyrUeY|e;EC3BL+*UJ z29q5^6-IZeG4;v-MO8&1S!-C~1B|)=n)RykjgranERSjAE{}{U`>t>}?W?wL>T0qV zeKR{aIyE^xGBKI#?rc=!s6tQ+>I3Aj4yAx52XG0$Xs&8ZQ1@fVqS8x;ZMR#4IDo#Q zMSKah^NZn1aK1bTo4pt;pRX)Z*T()z2$2jU-wO5l4VPrXFpN1hr(cbIJDtzw+h~fP zKp}l+oPyJ9Klwh?x=|iRsS--+P}hy(6Mq*4WMv6|kWk3UPKlnQye$>9eMkH5{hWNp zVZ6EeHEryYrJZ!P$8LT_ekez&gxrDTR_Gvol({t>tJoR~VhfSXrvpKXp@=B*KQ6c4 z=Fn(?r_Tq!(ViF)FZ+BW?UVGTQ^apSba0G@zZt9#UK=Tu ze2CdT+$WQgH3S=#prQUmvLCIw*r-RxTkWcX=vBj=u;Uuo5sXW{0v~lH_Q*SL9EA?7 zLzI%JJwhLD!LtJdBhwK7sjJ>~c4jl2pHV14qyDnm0bkdoUk3~W6b@Znyn@DplF{y<8$nAHI=X#|p|D@AZ=yjgT9dJ}`sg3zjxjl&rG%JJ_ z42F(_^g((dKBzm&Rl+`Lmn(^gz^DPWZ*39mb{y#B0~}EiR?U&eRXI+!5k)m^DTOie z*D*m%B8#)f0Rrjd3_vTqA~)1?8zFo}E#pm>>3K-CTm!p~oW?yn&vEF;JZ zR?C$H7YIz@#MaL(TmJ(wn6&}JRXiy%Rb;cS)*~H;9^tSqJMS89dN)w0CiR;2%Qm=c zi!vecRn@Cdr+l<696DQ0{$e$V%28!;t<6+`66Iz94aZ@Z{7x4a4PT!}j9!djUet^eKW*AJpC@MJFMIN;Ej40H_ua*TAS$ zt#7i=5Q(XhtrNv5#MARg8Q2zbq}9?w`xx4;BOC^SHz7Is)75UBlI}} zMD-Ul6!OMUv(3#FZBDXych!!_xjsfDM3gFFV=ZDtWvPORkZaI72LrBNiQ|!E&`n_l zt_-z3@#cp^spkG1sH0BVk4YMMlvKS=R?s8s{#2{GwaK4Xn+(HZ=wDe&%u!X-dsWn| ziY9VM=s-uawyY|ey*-~@@R6;1%EV;8Uio(%j4+6!11kVJkN1zzXxp;6w>~WwM3(%) z{Qnl$EyJHv+7U1^ew26q0SeF#^n#I(_A22ZgiGp(--lDOLqN8|J=EZU-=nj`dtKH6 zuaPx)mVqB^|4#Lnf0z|q9z$#W4ZYE=H$G0EZrwYj_u#L?$;OU?`tkK%f#-rAa1c1E zMwGa`bn7*QkGmAsoLs`3CH1i;y#->>jiLmj$i&A0NnqovRuXKX7S168^FGTv)B_#) zG4BI5k{}daHAfl!(&v1eT!5bu+i@@7n7B4QG3k5b*RPF?j@uzyjqtja39W0yf!?oi z4z^+VA(xr=8!UdE z#fL1u&4R@5SP~!c5})l_gc@bHqyTyPBnsDlAH&B~g5vCSjyVq^JB(YzQr-N`u+%jG z@AlHIzI9lVT;6wBe18y3Grj*45Tr{U=#c^ZWV}3!iwrmCL)ww=ng>c`mrpl2O3&7z*E&9%6?mxTn*9Na|gHFVtL&J_|pNR$V6 zkhKphNI_}_$mY`N?yIN2l5Qh~b!5CTIXF=$3*`C~bzDoimp;O&X2Fc2A@O+jLl)Te3;02S=NF*qA#txuR{xLO1>!h2(9H?PZvI!f z2Mf8A2PlZd4d-qb@4bS)w_b2KCdhVN2L$oDdBoxv&RDSTo*b`t;ks9^Kj)-R*w&>J zJcEHrqLZzTODB0I!#1rwufyT-B^$}tXy&g%PlDj>8;9gN?ZuRC0i7U%R ziMn&ny{$?&Y zg=90XEfL<&ap3(zqQVC_ElW?cH!i{I(!fE=8DM zCkUO?=|v4&$P1r7`1N=G70aa7jv}SxCU=y;n^^7^8U$K>K z+<};$F7~>x?PcOYbwOh4*dkp_(aB6LYy@@PdQyM(yxoI7U#rgXQyGp!^_Nk%Dk{$7 zQMASk_-@pL)dt*nOt0~~3&0!LqU?|9@%D4i5&NA%3OsaP2jr*J&4~xm@d`E@Yi_{E z{ttd+a^MWQYxEJ?0H;w-7S$_SVoc5u5`n^+&#hC$jd1DN1pntlB(X=lX@6|OvwEvJ zm+D%j{&iBpMh|eFsMwftY9`#Q)8VgATp#!T4JRkF?jRjuceW}u?3t@$g%l41eSLxh zz=euzG;94iCS%6mAGvm8dSrTHYSN#ccyD~_#WNx4UK<1y3sl!cIWKoUEHYQQB~DRIZC*@yr_OCVs(4L7zaeF zl@UaJjU#=9#T6DeP-s@P1gAB#F6z0ec2_r)Ebx=CLnm9DU@sK`Fan#6M2y}H_i=Gz zgP29-J62xwMp~u)XL46zAoupBjMn!fU z&7aNhb^7wXPFMaJxSjh7y}7(|(8)W6{F%ZYr?0EaIh^Z-i`s>9FaCEq2b=?iywm3# z!WEs@gSq{Aj!?j-w@_i2#Sd4~Wqe`}1&j|Y4|V4uhB7csP_Xd2V?9Px#?dV6?Vxq- zumsqC(6I9otPyI38#DDH&B13xXc6>u566$6K{3wrJG%6A*IiST)IjZ{A~@7zqk)zm zj->shW;w0!GTV^`3MGGR9I?XGhvQ>bNd6V>slRL;WLkao1=7v-ZRCy0gsM)gtlQ)f z^{wO)5ahc+cRBz?Q&!LIQ5Vd_7U8xPgoLsaKDf7HbO z8xB?Q#LDKBoV0}aZ4Bn^0F-UIcm-afeY*k&E+Zuf)zueB5v~gX6)O6Tf>NOAa;_mu z0SL@bv@;KQO1yuJ!os=2OGw#v!s-1>jQ1siN4XbAr&9=d$u?3xhEpT~Qd%0U^APGT zhTEUuY#cnP98a?G)M;KTu*e0iz>M$E3S5gWt-y@$)Jl;So=;(%9Kc_*JZjxYar7i0D(!YVY zAjhG1qrcB>K|r|#D3RE}a4>oX(B$A??NBEZZ8L;tfCgpK%YohAw@{7yQS{S-FzwN= zWWt#+93+EF1aHRf(YoBjIHk_f4Z9lxa$a=%HSy4|q8V$xqC-Q)oGDGu*>nomgYP3j z7(~9Ws>|*LeiiL%1d%6I&j7ZLB5xz$G1kUesOjTgpfxK6)YO>8L2X`b79%)c6Q^8P z08y>AZr^X?y7n$&@So>;p)V=99vTnlGz`cw7@4m88Em1z*tAUv0abbW2o)}JD>n8> z1vad+7JH;Zx3ICaQZB1dK`P?w6fLYYJbnc0tQ9(d?Yf(7M=fb|IJq^pdOLyLKsTVZ z40;#~yl(VMb75FS+}`zltEh^&B}A3;0Pr6%A@FKpMCE9G z7-G~GXJsq*h;VpwTaW5^{{%DsZ-P$7k)sb<#n*5HFv7JUr-qbG>NN7;94;ZsdWk-| zIR@_z?grctLspRylPbUQ8B{xbMxN!>gB|u224*+TG59)V@umQ&@7BRf0?7ks+ikH@ zX9Nb4K>2!~Pa;e3k`0R`s4dT6B-r#-k#%i;hyS z^5Y$2?FkfZ*tNFhEoEH`U&g@kVSv)L1L>HPJ*|HSdWt=}db%}%0PtY~(nE>U*lc5H zQo%F&4+Gd>^a5&GF!~9CNC+u{QB;uPx4|f)QX7oIX|};AqEZ`-A}Y1PD56puj3TVE z!6>3q8;l}=vcV{#(p_m(ihMbzK%To#D|?vpW()Sb_h@Aw-&$nLe&(oI!LGr6tsG<$ znk|R;HX|!|f$)%4khDImm7{#Wk+%?H9@Pro7(AvGWd5Jf3bOOZwSt`d39TUYeo`w> z@l8lR?(%&|R!;G)NLEfWq&@}mse&gR3;5^)U=(8)J{G*)gaIva(AAzrw5x%!8$xa5 zP~9-LJ5V$oWj*HQm2tc|wg)8MN2}L`3DoMTL2C=smgOj+`0y2wj&q;Bh3jB@P5eLN z$cOM}=8dYRmX^6CYsH@B0rdtw)Ryv;P|q0#c{8j1t`ns*2(4MS;i6 zsSUc2oYFTIp58KQJb;h)f85o9LHic(>+b5Xz8#ew)_TmC>h?$5s1egnXr9>wVKiCm zO*UEg6+uX+#Xg!M*?LcGfN9bcUu&Bm-6GTAXgxwaI@Z4lB7ZbDgu2!qPK8+FjU^Qb zym;h4W{z=xedD6bpc&kNCV7>i>EZ(J6~jdhwlbd!QP&2a>rHemDu(U{0~YH+`?foU z*Y|l2&{wt&Ee|DcaIvQMxzBOo{By;JQ-v_uY=c22Tp8@6wzhJ7!u8UYG35C$2rT2*0ZQsn|LS8$k1A1DZ6|suP_Jx)A)5XWB}+es zp4qtzgO6ILX6CRHc+CCU2EYk4!P40794a<2eo)~cPR2`8ObF%W<@<%ycyMvr^YN78 zJU%6S^!SlAGk6SpA2`en#gS%(G{e#oTqrc0!BJ}H?wpD>+w{i#cB&e}%4sjPNuY;v z*)}lfJ&#!*zJN$4s{yqiuC^;#lWD+CPLT{g&eFe5SXBc$J-IAU%@}qHU}S~?Tg+e{ zn1n56kiE?$vSm`wS176*l*UHHF`iW#-gBe~d9m$rIbABk;y-|MdeY>rj7RnmD50Px zS25Lvsgg=$HSPE^gv`1#7FkkAk-d;IR{2L4M)xQmBUaoC=NUHsSjAX|mCc+JD>7HH z`kt&A6Dy`_b>ibz_nWE|0P}8!)h0Ij2msz7p&R@bG7R2_P4{XT{!3_igfSgK%7l+; z1^bRNjso1ix;0eD9V+*;XP+B>sRJRNYkPzUt+T@&2vy(PfDSHo6_MmIg$_#j^GE_a z^11czK2epd)Av|i=!pF)mRNB(+T2=jT$a`ZprZS{^yf8E4t2!gQ{~Uu?TfZ{`L(y7 z*Xx)qr~Oo;dBmL%ZPfA587)QsS=4k-L7AgzbDN?Zztt?)+WRn8MRKsV;@C2jdf-y! za0Ne^;`4n5{z%Vt!rjfeoF1GwJZRSxIYvS&=Xj{)VVncou<6LtM&KW~2vbmymua?l zz#?p9BVMX%v5^{cGP5w`RmMiF-wrmCe3Km#!Taz zX_$`A(8m6>iH&Xa{7y$;!#H-B$rin#XSx{gX!9DHU(_asNU%j|)SLCf`yDQh2-aQu z+Tp-+LodA8F?Kf4s{8C4ZasS}YhQEEcDCD>N$lLK&sq?Qa{z(}4OqyF5w7Qk3Cv)T z;xP~n7OUjDl~lSP)fYh(>#M96Ved8EZEu{}g#b}WLpEg^F!FDJm8ZR;d_N?a;G-nA&-`!GKr4gcn+%iVE~iKX2_v zXAEHt2pPu@r~G4~d77jJfRXqG6h&$XF30DQ1=NMUSpaSqtackL>gwyTKK5rIHZ0io zz5|2xM{RPlkanFQ+n=c;-`nfg$rtvuWq|xwS$-B30E+O?u41O4U_t2N+2AmT_k?i5 zrDavP2qyJClesr8sGQGP`%5<4WvePnFR7WdPh~q7?|NxZJ#nA!8^BPeuVv!Md0!jb zY~>FBLD)j1d430q3pi&8lTwqhi?Ss!%o=YdE7qytT$Tx)1?Ndk~3dhVX6qoEz6-NMBt;&1KP18V`lPlF9|N=QL# z>sN}s3%BeT;`Q~n+HD|e`m0SbQ(`Tdy3pE+Sl3F`q(3vN2bqcKg{=0x9^!73Up)nV zPo#72KsnH%gtlAs_#*hjIe<5I%`cDv_Lv#hWFLMeDfJ`PF@h2ShXH!%<0snjr9F;7 zKHxY453qlr^y8D0@f=Fk;y4$B7ntyqmaRPw=Dz2{@C}CQ1N~RRuz$!+Gw4HuL--j; zzu$cpn{{|Ft@U5b2Dvu~KMhY0s{wr=hfb|-O*x@>G&%Gv{!^{435Nhb!q^CY1#?>G zWM>!Na(cpMkO&AxTot-*Zmr*7z&LVxqOjG>Q2$4LwCLY z!Y=|f@%ym11E$NqNrZ`;;56RA(*%ZIi+3C=*?WqG%i##v0T_!Nt) zDB5~{dvu<65`!KfRW5@LT}UFP|MOXmEjxQ!*M*)PJq5kWqjpg56jAQ#>B9l6zJW_5 z=eJPk`78do4c`ROgNl4xwagz4^8PzJ{U;W`$KrQc{0A1l!{P=Cefp055vN$yP^_=! z*x#)2=GdhVh5Fc8zizbP<;DbI*^2jVTzLPP#eYMg=RH@-_}R94;QbG_PP6zy785Mq zVeu&zS6S$bAval@XTi1LEuqkH(qG>4mf0Gz_-8EsIg5Y6;@4QHC->{DeTT(wu+SIi zeiJoKX5%-8mc4(?_TOglTP)sXkte_TKAAQBd~--iNLs#EU9HybX;8|pQVKgq_(|{d zcUPxT87<>yZOB`lhlk))UfRXhtz|bRzhqAbzR4};_(xE5o#9Iq>;B{I6&?mI;%yfE z@9N4O%J<~=nt)j^cAXN7?H|O5;#>&*`4SJ%ujkXs)L>?-Y0Rjp3R*kn^lL=glqKwoI|*FDw0tMzpnVi?a;zC`^x?bgG%M)@)7FVpTa#tW3J5VyxE!XaR$Z^34jvSs=f z@|`5TM%fDOSE&3h(x*sQsdt**fa!7CuTn`zCr^wfvsHQ!mS!hu<3YAY55D2?8NwL5 zqk=}HvQxCNdv-iHjlueOa0Y{O49?Tr;y}ij_1nEb#mac;aV)(+dX+So ztkatc{SNlMNV}IvUnKXZNMCxNY)~Am!4PaN(3?5gby;>@Cij>3@5;`B6g0zuZZ|gg zoc~-Qs^5B2+D@7D)#IeEzo;wabdklKeq!>i!C}3AYuhKK*BL*$t5e-?>r(yf)290Q zjT@ig@$I4S+LE7|7fJC|>b4FN*EL1Q+hR*+Lp*qh2kMS?X*Y4cF#BC>donBs)(OLH zFmwt_G&5C0hs`p4X`JW22&0lTwZp<=?^Bm_Wam!TIOWpP47^G~psiIoEYx0U`^tB< zN^QTN6q(9Rp_R9)qYGUoUaLfT?L0&imRoA)?Zrc8p^UqUH9t~=L+DtrWjYhfsNafM z@zCk=`FJom$k6u|jT^Rm_-Whh5I>3cN!oo5YR|g5gaCOi5+-G5*w=-J{+M;XguZNx zOobf~0m2Rv-(~p|>@RhaCGA|RJ%h%UyrOTM7_G5~ae;Nhjp<(9s$F|X+lZ0Vce zi!HL-b?q?E*lf@OEHQ2K+#Wz}?79#0H z%S^pUO2e`_)jkA)6k786F^OQe#?)rb(1pc1>GcMThw#k;%sc(Ay(l>)@ zSJ@iusNn$JR^9vqe~%l8T;x~KJbL)(-lJje%>dpTrWZ%kcM_+M&g_8lbmm@>+Dw;bHpQOxbCK^s7(35VVvYPU%4*DJ z9WiTt4>7|pb4cc1g9Udz2#P~iYW6PkSgCCdhaL`Q9cJnpQv-pnuU)iI7vTo|Ka2+_=WYxqEXNY`=#>CTE)+zh9 zXz}HUT^B0L6`9KRE?KiTLx!*Nl`CwuB3x28(5cxoxKDib;Cp#KVKr89Y*;0{f^K3; z_;i`15 zEM^vnGk(zNM)7QHMK>bV(tcPLGgKNo6e2%DH3)= zS+fT)8&>0OhkIq_Q08crWho!Fn6JbdW|{+#`0(B)dI6at+37m=oAH{`weqgXHvmJy>p^dHL%_g z*8V)3UeJ&|%GvKj!z1}ELiDD$o9e(IsT<`sYID>XO>?S)Rmf2{C*fn76mnuG0YF0j zW=M%Px71TNLJINTNy0oviZPlX;X73Sc0iN_GLZP-p8n8z&LW-m4Ev(l)U_K|CprvD z1K@-O#;IY*MTZ4&3gyCwq2AkfvGbs7CutzZuuwiAVU*%Q;4aCslH+uUu@f;y#(tNi zz^o+UlarC!!R|?v6b=(Ad`RC*LQatj0QKJe;oRO&fuqqF(8D@bmBs5rAJirzQ@tM% zy~O)3M{7O=i$}e(TiRi#tAgu*SAt3-H3I`*8=uPuK=BABHq{6ZXK?Na90J`?08NZI zvkc{0;S*DgvYl`Y@;tyIYd->2*)9;}IB?E5W?`YcM%|K`OdZ;x6oj+B_@!%3c5v4D zuQv!B-A7F+NVsE>NE2;`q6BF&Q8F0hCKcp&%NX0#Ykv(sr<1U!Y&(Xduc05zdF`%o zsC`jZ*}`!l9uAmq^)Dya`tvJ4c30eSJav6P*pg$}LOpAK1vr*uKs7GlX~0j0EXe~} z=x~r86Kyegx1=g!Nnx3IkQ{QN&|=!HRc;WvLeZq9@`>9hkUsf^02D4z+yzYQvc=_7 ziy;W%o`}~7%Cj+8#N4#SP}`4oo=8V`b1O?7zd-Q06M)Zc2bseuLX#=BRR zC{S4%*e+~EiN_aNKtFMDxW_^$&GGu6wT8t~j>y^ga5kgsEmH6uY<-MXLbvZaGRsAw z=*Psjb@@V&z&yT{CH&%3IaOIk0&p=}K@zYY zts)%=^VR5dbT0a+vQe!^VxTyN21OhjK5`>x!996>anmqu_c@v$^sZuDO5F^8`P{Jk z7*e8eN>9>Z|aIS~0^%Gvp*CD)nIZ~(x z_hrwVKIC#ec<|Br9X?r>O58!#iziPr$`)V7)_HC|<8|z}vE-(4EaD)>iVk=S^FliM zrSUD6t;Km|>jv*)b=jk&!_hpr6Pr1d0TKg7af&@u3^A$@ramOS|5onEn%wH!Y?$jW zF0vqZ(bV8H_@G@4pGqyNRce(pH - send_gmail.py - - - December 17, 2008 - - - ivan@cs.washington.edu - Ivan Beschastnikh - - - Sends an email using an existing gmail account - - - This script can be run from the command line to generate a test - email. The command line usage is: - $ python seng_gmail.py [gmail_user] [gmail_pwd] [to] [subj] [body] [attach] - Where all the arguments are strings and attach is a path to a - readable file or is missing (for no attachment). - - As an import, this file should be used as follows: - - Fist, initialize the global username and password variables by - calling init_gmail(gmail_user,gmail_pwd). - - Second, use send_gmail(to,subject,text,attach) to send emails. -""" - -import os -import traceback -import sys - -import smtplib -from email.MIMEMultipart import MIMEMultipart -from email.MIMEBase import MIMEBase -from email.MIMEText import MIMEText -from email import Encoders - -GMAIL_USER="" -GMAIL_PWD="" - -gmail_file_name = "/home/monzum/monitor_script/seattle_gmail_info" - -def init_gmail(gmail_user="", gmail_pwd="", gmail_user_shvarname="GMAIL_USER", gmail_pwd_shvarname="GMAIL_PWD"): - """ - - Sets up the global variables GMAIL_USER and GMAIL_PWD for use by send_gmail() - - - gmail_user (optional) : - gmail username to use - gmail_pwd (optional): - gmail password for gmail_user - gmail_user_shvarname (optional): - if gmail_user is "" then this specifies the shell - variable name to use for extracting the gmail username - gmail_pwd_shvarname (optional): - if gmail_pwd is "" then this specifies the shell - variable name to use for extracting the gmail password - - None - - - Sets GMAIL_USER and GMAIL_PWD global variables - - - (True, "") on success and (False, explanation) on failure, - where explanation is a string explaining what went wrong - """ - global GMAIL_USER - global GMAIL_PWD - - gmail_user_info = {} - - # Get full file path - file_path = os.path.join(os.getcwd(), gmail_file_name) - - if os.path.isfile(file_path): - gmail_file_object = open(file_path, 'r') - print 'read file ' + file_path - gmail_user_info = eval(gmail_file_object.read()) - - GMAIL_USER = gmail_user_info['GMAIL_USER'] - GMAIL_PWD = gmail_user_info['GMAIL_PWD'] - print 'loaded gmail info' - else: - return False, "Make sure the file '" + gmail_file_name + "' is in the current directory" - - return True, "" - - -def send_gmail(to, subject, text, attach): - """ - - Sends an email to 'to' with subject 'subject' with text 'test' - and attachment filename 'attach'. Uses the gmail account - specified by GMAIL_USER and GMAIL_PWD global variables. - - GMAIL_USER and GMAIL_PWD must be set up with init_gmail() - prior to calling this function. - - - to: - who to send the email to, an email address string - subject: - the string subject line of the email - text: - the string text body of the email - attach: - the filename to attach to the message - - - Not sure? - - - Sends an email through gmail to a recipient. - - - (True,"") on succes, (False,explanation) on failure, where - explanation contains the string explaining the failure - """ - if GMAIL_USER is "": - return False, "GMAIL_USER not set, did you run init_gmail()?" - if GMAIL_PWD is "": - return False, "GMAIL_PWD not set, did you run init_gmail()?" - - msg = MIMEMultipart() - msg['From'] = GMAIL_USER - msg['To'] = to - msg['Subject'] = subject - - msg.attach(MIMEText(text)) - - if attach != "": - part = MIMEBase('application', 'octet-stream') - part.set_payload(open(attach, 'rb').read()) - Encoders.encode_base64(part) - part.add_header('Content-Disposition', - 'attachment; filename="%s"' % os.path.basename(attach)) - msg.attach(part) - - mailServer = smtplib.SMTP("smtp.gmail.com", 587) - mailServer.ehlo() - mailServer.starttls() - mailServer.ehlo() - - try: - mailServer.login(GMAIL_USER, GMAIL_PWD) - except smtplib.SMTPAuthenticationError, (code,resp): - return False, str(code) + " " + str(resp) - - mailServer.sendmail(GMAIL_USER, to, msg.as_string()) - - # Should be mailServer.quit(), but that crashes... - mailServer.close() - return True, "" - -if __name__ == "__main__": - if len(sys.argv) != 6 and len(sys.argv) != 7: - print "usage:", sys.argv[0], "[gmail_user] [gmail_pwd] [to] [subj] [body] [optional:attach]" - sys.exit(0) - - - gmail_user = sys.argv[1] - gmail_pwd = sys.argv[2] - to = sys.argv[3] - subj = sys.argv[4] - body = sys.argv[5] - - if len(sys.argv) == 6: - attach = "" - else: - attach = sys.argv[6] - - succes, explain_str = init_gmail(gmail_user, gmail_pwd) - if not succes: - print explain_str - sys.exit(0) - - success, explain_str = send_gmail(to,subj,body,attach) - if not success: - print explain_str - sys.exit(0) - - print "sent" - diff --git a/statusstorage.py b/statusstorage.py deleted file mode 100755 index 55379a1..0000000 --- a/statusstorage.py +++ /dev/null @@ -1,111 +0,0 @@ -""" - Author: Justin Cappos - - Start Date: 14 Sept 2008 - - Description: - - This module stores status information about the sandbox. Use "read_status" - and "write_status" to set and check the status... - - This module works by creating a file with an name that indicates the status. - The order of operations is: find old file name(s), write new file, delete - old file(s). File names contain a timestamp so that one can tell when it - was last updated. The actual format is: "prefix-status-timestamp". - -""" - -# to store the current time... -import time - -# needed to get a lock -import threading - -# needed for listdir... -import os - -# To allow access to a real fileobject -# call type... -myfile = file - -statusfilenameprefix = None - -# This prevents writes to the nanny's status information after we want to stop -statuslock = threading.Lock() - -def init(sfnp): - global statusfilenameprefix - statusfilenameprefix = sfnp - - -# Write out a status that can be read by another process... -def write_status(status, mystatusfilenameprefix=None): - - if not mystatusfilenameprefix: - mystatusfilenameprefix = statusfilenameprefix - - # nothing set, nothing to do... - if not mystatusfilenameprefix: - return - - mystatusdir = os.path.dirname(mystatusfilenameprefix) - if mystatusdir == '': - mystatusdir = './' - else: - mystatusdir = mystatusdir+'/' - - # BUG: Is getting a directory list atomic wrt file creation / deletion? - # get the current file list... - # Fix. Need to prepend the directory name we're writing into... - existingfiles = os.listdir(mystatusdir) - - timestamp = time.time() - - # write the file - myfile(mystatusfilenameprefix+"-"+status+"-"+str(timestamp),"w").close() - - # remove the old files... - for filename in existingfiles: - if len(filename.split('-')) == 3 and filename.split('-')[0] == os.path.basename(mystatusfilenameprefix): - try: - os.remove(mystatusdir+filename) - except OSError, e: - if e[0] == 2: - # file not found, let's assume another instance removed it... - continue - - # otherwise, let's re-raise the error - raise - - -def read_status(mystatusfilenameprefix=None): - - if not mystatusfilenameprefix: - mystatusfilenameprefix = statusfilenameprefix - - # BUG: is getting a dir list atomic wrt file creation / deletion? - # get the current file list... - # Fix. Need to prepend the directory name we're writing into... - if os.path.dirname(mystatusfilenameprefix): - existingfiles = os.listdir(os.path.dirname(mystatusfilenameprefix)) - else: - existingfiles = os.listdir('.') - - latesttime = 0 - lateststatus = None - - # find the newest status update... - for filename in existingfiles: - if filename.split('-')[0] == mystatusfilenameprefix: - thisstatus = filename.split('-',2)[1] - thistime = float(filename.split('-',2)[2]) - - # is this the latest? - if thistime > latesttime: - latesttime = thistime - lateststatus = thisstatus - - return (lateststatus, latesttime) - - - diff --git a/statusstorage.pyc b/statusstorage.pyc deleted file mode 100644 index 45ae5c574aa9729abd47171c5889b66528a5889f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2406 zcmcImUvC>l5TCuXFNx!jDk2RQ5UoN5ZfTr?ct8 zASux)sGu~UT+k^d)jXinU@;HrG$dW4(>j}}CDH+1EYowWU7<9j7nn3ymDcD5oUHnJ z-Ormm@3tcj7$I~+O8M=?4NSQ&zn@r_74krh$Hq2T({-wJ@=!UwFF*T2cJo6(#7Q@RjVjVJbJanh zr&i0IQmeFAt=Qq{P|KZZnK`|XN@t|i&TEo^PKK2qAEeXi{40y8DTh{`UCRW%vA1)n zWIscC)65OvMHXtL;X^s3m}Ut!YHz#bERK<>DN|ia)0bwfOTP{5$?n4!QZ#v5Ib~;U zd;g*IIm<#%z5Jm}b*`P3>}_ETJKpzqoD8qCGBJfyD8G^}1Mk#mEUl@MGliB3(seq| z<#eD6nK|}0RaWN8Iyo7q$USA3oUcmUMCGz7y3cfHTYF&wn5+%7obF&19@)A4FGv}5eIaAi7%&#r=$ox|I{;~U{ zEKS+wm|j=G#oYxzy8bVOy471MPHPRkzCw<;A2=tA!-2C!6Z-HAko!{Tw*u=Xg5JLD z9)PzWgHRBOl`s;uU_&&;O3Egl zX*1S+PxgALNqW7Q{Zx1#usgJXFO{zigFfc%#vd}lTh;6blMH=Ts?FwLW4;68_nCYI m@_NgD_CH?!T-dLw|1rMiW3w9|QP||aT7*7m1}jnXlfMC2z%#M{ diff --git a/tracebackrepy.py b/tracebackrepy.py deleted file mode 100755 index d2dd446..0000000 --- a/tracebackrepy.py +++ /dev/null @@ -1,230 +0,0 @@ -""" -Author: Justin Cappos - -Start Date: September 17th, 2008 - -Description: -Module for printing clean tracebacks. It takes the python traceback and -makes the output look nicer so the programmer can tell what is happening... - -""" - - -# we'll print our own exceptions -import traceback -# This needs hasattr. I'll allow it... -traceback.hasattr = hasattr - -# and don't want traceback to use linecache because linecache uses open -import fakelinecache -traceback.linecache = fakelinecache - -# Need to be able to reference the last traceback... -import sys - -# Used to determine whether or not we use the service logger to log internal -# errors. Defaults to false. -Brent -servicelog = False - -# this is the directory where the node manager resides. We will use this -# when deciding where to write our service log. -logdirectory = None - - -# We need the service logger to log internal errors -Brent -import servicelogger - -# We need to be able to do a harshexit on internal errors -import harshexit - -# I'd like to know if it's a "safety concern" so I can tell the user... -# I'll import the module so I can check the exceptions -import safety_exceptions - -# needed to get the PID -import os - -# Armon: These set contains all the module's which are black-listed -# from the traceback, so that if there is an exception, they will -# not appear in the stack. -TB_SKIP_MODULES = ["repy.py","safe.py","virtual_namespace.py","namespace.py","emulcomm.py", - "emultimer.py","emulmisc.py","emulfile.py","nonportable.py","socket.py"] - - -# sets the user's file name. -# also sets whether or not the servicelogger is used. -Brent -def initialize(useservlog=False, logdir = '.'): - global servicelog - global logdirectory - servicelog = useservlog - logdirectory = logdir - - -# Public: this prints the previous exception in a readable way... -def handle_exception(): - """ - This is an example traceback: - --- - Uncaught exception! Following is a full traceback, and a user traceback. - The user traceback excludes non-user modules. The most recent call is displayed last. - - Full debugging traceback: - "repy.py", line 191, in main - "/Users/adadgar/Projects/seattle/trunk/test/virtual_namespace.py", line 116, in evaluate - "/Users/adadgar/Projects/seattle/trunk/test/safe.py", line 304, in safe_run - "dylink.repy", line 472, in - "dylink.repy", line 360, in dylink_dispatch - "dylink.repy", line 455, in evaluate - "/Users/adadgar/Projects/seattle/trunk/test/namespace.py", line 1072, in __do_func_call - "/Users/adadgar/Projects/seattle/trunk/test/namespace.py", line 1487, in wrapped_function - "/Users/adadgar/Projects/seattle/trunk/test/virtual_namespace.py", line 116, in evaluate - "/Users/adadgar/Projects/seattle/trunk/test/safe.py", line 304, in safe_run - "testxmlrpc_common", line 254, in - "/Users/adadgar/Projects/seattle/trunk/test/safe.py", line 174, in fnc - - User traceback: - "dylink.repy", line 472, in - "dylink.repy", line 360, in dylink_dispatch - "dylink.repy", line 455, in evaluate - "testxmlrpc_common", line 254, in - - Unsafe call: ('__import__',) - --- - """ - - # exc_info() gives the traceback (see the traceback module for info) - exceptiontype, exceptionvalue, exceptiontraceback = sys.exc_info() - - # We store a full traceback, and a "filtered" user traceback to help the user - full_tb = "" - filtered_tb = "" - - for tracebackentry in traceback.extract_tb(exceptiontraceback): - # the entry format is (filename, lineno, modulename, linedata) - # linedata is always empty because we prevent the linecache from working - # for safety reasons... - - # Check that this module is not black-listed - module = tracebackentry[0] - skip = False - - # Check if any of the forbidden modules are a substring of the module name - # e.g. if the name is /home/person/seattle/repy.py, we want to see that repy.py - # and skip this frame. - for forbidden in TB_SKIP_MODULES: - if forbidden in module: - skip = True - break - - # Construct a frame of output - stack_frame = ' "'+tracebackentry[0]+'", line '+str(tracebackentry[1])+", in "+str(tracebackentry[2])+"\n" - - # Always add to the full traceback - full_tb += stack_frame - - # If this module is not blacklisted, add it to the filtered traceback - if not skip: - filtered_tb += stack_frame - - - # Print some general info - print >> sys.stderr, "---\nUncaught exception! Following is a full traceback, and a user traceback.\n" \ - "The user traceback excludes non-user modules. The most recent call is displayed last.\n" - - # Print the full traceback first - print >> sys.stderr, "Full debugging traceback:\n",full_tb - - print >> sys.stderr, "User traceback:\n",filtered_tb - - - # When I try to print an Exception object, I get: - # "". I'm going to look for this and produce - # more sensible output if it happens. - - if exceptiontype is safety_exceptions.CheckNodeException: - print >> sys.stderr, "Unsafe call with line number / type:",str(exceptionvalue) - - elif exceptiontype is safety_exceptions.CheckStrException: - print >> sys.stderr, "Unsafe string on line number / string:",exceptionvalue - - elif exceptiontype is safety_exceptions.RunBuiltinException: - print >> sys.stderr, "Unsafe call:",exceptionvalue - - elif str(exceptiontype)[0] == '<': - print >> sys.stderr, "Exception (with "+str(exceptiontype)[1:-1]+"):", exceptionvalue - else: - print >> sys.stderr, "Exception (with type "+str(exceptiontype)+"):", exceptionvalue - - # Print another line so that the end of the output is clear - print >> sys.stderr, "---" - - -def handle_internalerror(error_string, exitcode): - """ - - Brent Couvrette - - When an internal error happens in repy it should be handled differently - than normal exceptions, because internal errors could possibly lead to - security vulnerabilities if we aren't careful. Therefore when an internal - error occurs, we will not return control to the user's program. Instead - we will log the error to the service log if available, then terminate. - - error_string - The error string to be logged if logging is enabled. - exitcode - The exit code to be used in the harshexit call. - - None - - The program will exit. - - Shouldn't return because harshexit will always be called. - """ - - try: - print >> sys.stderr, "Internal Error" - if not servicelog: - # If the service log is disabled, lets just exit. - harshexit.harshexit(exitcode) - else: - # Internal errors should not be given to the user's code to be caught, - # so we print the exception to the service log and exit. -Brent - exceptionstring = "[INTERNAL ERROR] " + error_string + '\n' - for line in traceback.format_stack(): - exceptionstring = exceptionstring + line - - # This magic is determining what directory we are in, so that can be - # used as an identifier in the log. In a standard deployment this - # should be of the form vXX where XX is the vessel number. We don't - # want any exceptions here preventing us from exitting, so we will - # wrap this in a try-except block, and use a default value if we fail. - try: - identifier = os.path.basename(os.getcwd()) - except: - # We use a blank except because if we don't, the user might be able to - # handle the exception, which is unacceptable on internal errors. Using - # the current pid should avoid any attempts to write to the same file at - # the same time. - identifier = str(os.getpid()) - else: - if identifier == '': - # If the identifier is blank, use the PID. - identifier = str(os.getpid()) - - # Again we want to ensure that even if we fail to log, we still exit. - try: - servicelogger.multi_process_log(exceptionstring, identifier, logdirectory) - except Exception, e: - # if an exception occurs, log it (unfortunately, to the user's log) - print 'Inner abort of servicelogger' - print e,type(e) - traceback.print_exc() - finally: - harshexit.harshexit(exitcode) - - except Exception, e: - # if an exception occurs, log it (unfortunately, to the user's log) - print 'Outer abort of servicelogger' - print e,type(e) - traceback.print_exc() - finally: - harshexit.harshexit(842) diff --git a/tracebackrepy.pyc b/tracebackrepy.pyc deleted file mode 100644 index abe291ba1f45443a116194b11428c5458430ff3a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5591 zcmc&&+in}l5v}1(5_R=O791E!+c9DpkwwevTLd9kucfu+?8*jZLx!@(U^vqxM;y-3 z-93`Hke<9iUh@k{UV{MngnU7MVIT9J_f1aqa7am618c;KmB?mKb#--BS9jH^DgJx9 znl4>`>C@n^ir-K1)&D>f5bdIMC@4_m(5^$WK+++%kQ0lvTO^4Oh^3rZrrk2h3VCBZ z(VNKWRoboQ{Yl!L%==TcJC*mRX?L1}A_b6Erl3Lrgp5-FA()FPQ`CZ-8I~V_bB4Yt z(C#b+v-AzLougpRcFytA+l%MelyA|9>Z8=O6SXRyrP_qCc z+p=e5w;`2Sd0^U0;?C{c53AL6seKjpOqj&0)i0AEjihKMO7v71LsColk@R9=l;_Ka z=Xdn75Mt8^<8`DKrY%LUkEKs=2rmvqwL2V7QqxO~h?1lu;?T$XI>{!gq@}!W7ZjhD zkWnNK+MW@i7Ht?HW30ctyj-n@|H0pu9)qb$_WH}czU^zTDY<_U^!-qo)QjBM>q^~& zvOF+_fm4J@5M(!v{G{8>8Ti?R(1Hm$LpRj^$>(MmS&1_&F^PMLGF~GiO@O2mzatGM z7za43Uw-^wg?7e_{G~T!e!f9UI%J); z!ZM~mrnvUQsY^C`cml^SOx0^q3%UqUj`!bv_WVQ;&WCYkd3s}Vx@+vuoXgF8(fELOCkzm zDOP^7vLp~;yIvS)47FF#RoA@03tFD4y->*>F0HO<=@}Erno()osTrwF?fZh%Q1Hrq zD_HJ(QHp^5!NLd8Fqikq?YmYUlU=Bg$qV|hqO;6K<&1Y9+_8*n+2;NBy^NpSzik;a z87^<7XZ-ei`S0Gl_y1XRGIZzKuG}7|>$*YWHq+R5*|#6OTi+}D?!yOG;{(O%C$QrA z(0;J=pWe}Y-VVEw>S2G7?2ZEZxL#7TORVi9&945e*`R9>;IK%^@dujaW(cM1`E? zHPfp4FKAiDM+=S|>`n*Fj}aR6&!Fe~gQJBsXT7bjW6p`~69=JbXMrKpE&(l3$PrZCX1o~X7x=zKMZLt%5&m>F(3mnTL3;?0RBDvIm6n$GUMX)H*>_u z6Aanl+jZLMUtwUoH^#7&I}^+aTnD_o6JI9Bad7&0U~)7NU!&JRuE4zv`~J@5!&wd} z{G9p=gi>N}+Adilc=274oSk@uN;tWlDyi?t0R(qYjXrq_IQ_)TF^tCYPoA>c=Xgyg zcIfpEAoVFrU!N4S;bdaQez+Lay;#P14Y}KkwubsA?k&RWT2{+uIOH?M=uXDk(tKuqh6t@UZL+Mx{#P#-XG}2uC+j5*6@ii>$cJs>Fv-y~F|zLV(sdVSH;+ z6+;!=$pk<;E3G*j+|VfEZG{c_o3S=f&kD<@qlUC~cqV)>6BD@zMY#@q-W1-x7e-v^ zE-@8cyXfTs+}*B+@>+QkOy%o$dYI|%2uEC$bH4@g)7+YiYt>^_8ZZ%_xwm=_6H zs=(fd#z!O+0~vUPfq2dYz&K2YE=YTAPoc)O3_yWasg0rMGOf0fn8Ebgb_fX@a5=zv zHo|#xutnL1vLGv?Zg(}~+O`LW-8IPmze zqLwuNK~U%1mp}Kyz~-J)irNHb->$gWW?yRU!h$->w~7osW!JLViUWat^~BM9kr%V? zLQbBnL7vWzh)uwwyK0eymBvsadxgQ7C~SsOS={jBWTA^tbdZJeNMu$1Gw4|+nlDn( znMN*}E6q3;oSCtrbHyo^FBPT=^F6WX3e zd8zECH}S5G?e#ssE$z+@pECcmi!=YTQ~5$zC)95+U2wyPvVM&lMi6z*eDz;&a|g{R raogTUe8 Date: Sun, 28 Sep 2014 14:27:06 -0400 Subject: [PATCH 22/33] Add socket timeout exception --- centralizedadvertise.repy | 2 ++ 1 file changed, 2 insertions(+) diff --git a/centralizedadvertise.repy b/centralizedadvertise.repy index 0881b1e..296be61 100644 --- a/centralizedadvertise.repy +++ b/centralizedadvertise.repy @@ -23,6 +23,8 @@ def centralizedadvertise_announce(key, value, ttlval): response = session_recvmessage(sockobj) if response != 'OK': raise Exception, "Centralized announce failed '"+response+"'" + except socket.timeout: + print "Socket timed out '"+response+"'" finally: # BUG: This raises an error right now if the call times out ( #260 ) # This isn't a big problem, but it is the "wrong" exception From 72ceecfa857ff42c5a34f30e3aa8e6b7f9db14e9 Mon Sep 17 00:00:00 2001 From: asm582 Date: Sun, 28 Sep 2014 15:00:58 -0400 Subject: [PATCH 23/33] add proper exception as it is repy file --- centralizedadvertise.repy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/centralizedadvertise.repy b/centralizedadvertise.repy index 296be61..4cfb2db 100644 --- a/centralizedadvertise.repy +++ b/centralizedadvertise.repy @@ -23,7 +23,7 @@ def centralizedadvertise_announce(key, value, ttlval): response = session_recvmessage(sockobj) if response != 'OK': raise Exception, "Centralized announce failed '"+response+"'" - except socket.timeout: + except SocketTimeoutError: print "Socket timed out '"+response+"'" finally: # BUG: This raises an error right now if the call times out ( #260 ) From 5109e05108301cfc827fae7602cd7031fba3abe5 Mon Sep 17 00:00:00 2001 From: asm582 Date: Tue, 30 Sep 2014 18:15:46 -0400 Subject: [PATCH 24/33] Update monitor_processes.py Add a new process to seattle_process_list --- monitor_processes.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/monitor_processes.py b/monitor_processes.py index c3a3dec..7903032 100644 --- a/monitor_processes.py +++ b/monitor_processes.py @@ -109,7 +109,8 @@ def main(): #integrationtestlib.notify_list.append("gppressi@gmail.com") #processes that should be running on seattle server - seattle_process_list=['advertiseserver.py'] + #Add new process to monitor on the server + seattle_process_list=['advertiseserver.py','centralizedadvertise.r2py'] #The commands that should be run on seattle to get all the required processes seattle_command = ["ps auwx | grep python | grep -v grep | grep geni | awk '{print $14}'"] From 7b9200e4b6f0ccae820daef301dcfe6dc2906d81 Mon Sep 17 00:00:00 2001 From: asm582 Date: Sat, 4 Oct 2014 16:30:46 -0400 Subject: [PATCH 25/33] Add file with changes of exception and cleaned irc imports from code. --- monitor_processes.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/monitor_processes.py b/monitor_processes.py index 7903032..5a8df51 100644 --- a/monitor_processes.py +++ b/monitor_processes.py @@ -13,7 +13,6 @@ import subprocess import sys import send_gmail -import irc_seattlebot import integrationtestlib import traceback import time @@ -66,7 +65,7 @@ def monitor_processes(monitor_process_list, command_list, machine_name): integrationtestlib.log("Checking process: "+critical_process+".......") if not critical_process in processes_string: critical_process_down=True - error_message = error_message+critical_process+" is down on "+machine_name+".cs.washington.edu\n" + error_message = error_message+critical_process+" is down on "+machine_name+"poly.edu\n" print "FAIL" else: @@ -75,7 +74,6 @@ def monitor_processes(monitor_process_list, command_list, machine_name): if critical_process_down: integrationtestlib.notify(error_message, "Critical process down!") - irc_seattlebot.send_msg(error_message) else: integrationtestlib.log("All critical processes on "+machine_name+" are up and running") @@ -110,7 +108,7 @@ def main(): #integrationtestlib.notify_list.append("gppressi@gmail.com") #processes that should be running on seattle server #Add new process to monitor on the server - seattle_process_list=['advertiseserver.py','centralizedadvertise.r2py'] + seattle_process_list=['advertiseserver.py'] #The commands that should be run on seattle to get all the required processes seattle_command = ["ps auwx | grep python | grep -v grep | grep geni | awk '{print $14}'"] @@ -125,10 +123,13 @@ def main(): seattleclearinghouse_command.append("ps auwx | grep python | grep -v grep | grep justinc | awk '{print $12}'") #run monitor processes with the right command - if sys.argv[1] == '-seattle': - monitor_processes(seattle_process_list, seattle_command, "seattle") - elif sys.argv[1] == '-seattleclearinghouse': - monitor_processes(seattleclearinghouse_process_list, seattleclearinghouse_command, "seattleclearinghouse") + try: + if sys.argv[1] == '-seattle': + monitor_processes(seattle_process_list, seattle_command, "seattle") + elif sys.argv[1] == '-seattleclearinghouse': + monitor_processes(seattleclearinghouse_process_list, seattleclearinghouse_command, "seattleclearinghouse") + except IndexError: + print "usage","Enter the server name that needs to be checked [-seattle or -seattleclearinghouse]" From 7b3075c67266b3bcf285fe5c52ddd685f2f079b9 Mon Sep 17 00:00:00 2001 From: asm582 Date: Sat, 4 Oct 2014 16:36:47 -0400 Subject: [PATCH 26/33] Cleaned irc import code --- monitor_disk.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/monitor_disk.py b/monitor_disk.py index 3eed854..88cd065 100644 --- a/monitor_disk.py +++ b/monitor_disk.py @@ -15,7 +15,6 @@ import socket import subprocess import send_gmail -import irc_seattlebot import integrationtestlib @@ -23,7 +22,7 @@ def main(): success,explanation_str = send_gmail.init_gmail() - #integrationtestlib.notify_list=['monzum@gmail.com'] + integrationtestlib.notify_list=['asm582@gmail.com','abhishekmalvankar9@gmail.com'] if not success: integrationtestlib.log(explanation_str) sys.exit(0) @@ -41,6 +40,7 @@ def main(): disk_free_fd.close() hostname = socket.gethostname() + ".poly.edu" + subject = "High disk usage" if disk_use_percent >= 95: @@ -48,7 +48,6 @@ def main(): message += "Disk space free: %s" % free_space integrationtestlib.log(message) integrationtestlib.notify(message, subject) - irc_seattlebot.send_msg(message) elif disk_use_percent > 90: message = "WARNING: High disk usage on %s: %s percent used.\n" % ( hostname, disk_use_percent) From 3f29f67e62ff34190486081e1a019e6da8e2819a Mon Sep 17 00:00:00 2001 From: asm582 Date: Sat, 4 Oct 2014 16:39:55 -0400 Subject: [PATCH 27/33] Add file not found exception in the file --- integrationtestlib.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/integrationtestlib.py b/integrationtestlib.py index c19cfb0..762c530 100644 --- a/integrationtestlib.py +++ b/integrationtestlib.py @@ -78,12 +78,16 @@ def notify(text, subject): #This will loop through a file containing emails that need to be notified and create a list out of them notify_list = [] - email_file = open("email_address_list_file", "r") - email_list = email_file.readlines() - email_file.close() + try: + email_file = open("email_address_list_file", "r") + email_list = email_file.readlines() + email_file.close() + except IOError: + sys.exit("Aborting!! because email_address_list_file has not been created in the required directory") for email_address in email_list: email_address = email_address.rstrip("\r\n") - notify_list.append(email_address) + if(email_address is not None): + notify_list.append(email_address) log("notifying " + email_address) send_gmail.send_gmail(email_address, subject, text, "") return From fff865bdd15b0a97e9a1f34050f545f3e8b8036a Mon Sep 17 00:00:00 2001 From: asm582 Date: Sat, 4 Oct 2014 16:42:38 -0400 Subject: [PATCH 28/33] change in variable named gmail_file_name to read gmail username and password from the local system path --- send_gmail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/send_gmail.py b/send_gmail.py index 60ed6e7..df82564 100644 --- a/send_gmail.py +++ b/send_gmail.py @@ -40,7 +40,7 @@ GMAIL_USER="" GMAIL_PWD="" -gmail_file_name = "/home/abhishek/changes_monitor_script/monitor_script/seattle_gmail_info" +gmail_file_name = "/home/abhishek/monitor_script/seattle_gmail_info" def init_gmail(gmail_user="", gmail_pwd="", gmail_user_shvarname="GMAIL_USER", gmail_pwd_shvarname="GMAIL_PWD"): """ From d0dc94bcfbdd1ace1002e03e8babf4f66751f309 Mon Sep 17 00:00:00 2001 From: asm582 Date: Sun, 12 Oct 2014 20:32:32 -0400 Subject: [PATCH 29/33] Create ut_seash_send_gmail.py --- ut_seash_send_gmail.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 ut_seash_send_gmail.py diff --git a/ut_seash_send_gmail.py b/ut_seash_send_gmail.py new file mode 100644 index 0000000..36b00b3 --- /dev/null +++ b/ut_seash_send_gmail.py @@ -0,0 +1,16 @@ +import sys +import subprocess +import send_gmail +#pragma out read file C:\Users\abhishek\monitor_script\seattle_gmail_info +#pragma out loaded gmail info +#pragma out (True, '') + + +def main(): + gmail_file_name = "/home/abhishek/monitor_script/seattle_gmail_info" + result = send_gmail.init_gmail('test21119@gmail.com','testmail') + print result + + +if __name__ == "__main__": + main() From ebd6bd592dc8452ca06aea214612441639057fe0 Mon Sep 17 00:00:00 2001 From: asm582 Date: Sun, 12 Oct 2014 20:43:48 -0400 Subject: [PATCH 30/33] Update ut_seash_send_gmail.py --- ut_seash_send_gmail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ut_seash_send_gmail.py b/ut_seash_send_gmail.py index 36b00b3..924519e 100644 --- a/ut_seash_send_gmail.py +++ b/ut_seash_send_gmail.py @@ -9,7 +9,7 @@ def main(): gmail_file_name = "/home/abhishek/monitor_script/seattle_gmail_info" result = send_gmail.init_gmail('test21119@gmail.com','testmail') - print result + if __name__ == "__main__": From a1261d28485e6ffdd8e596aad0d89aac60febbbd Mon Sep 17 00:00:00 2001 From: asm582 Date: Fri, 17 Oct 2014 15:50:49 -0400 Subject: [PATCH 31/33] Added check_ip_address.r2py to validate IP address --- check_ip_address.r2py | 66 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 check_ip_address.r2py diff --git a/check_ip_address.r2py b/check_ip_address.r2py new file mode 100644 index 0000000..8b3a5fe --- /dev/null +++ b/check_ip_address.r2py @@ -0,0 +1,66 @@ +def _is_valid_ip_address(ipaddr): + """ + + Determines if ipaddr is a valid IP address. + 0.X and 224-255.X addresses are not allowed. + Additionally, 192.168.0.0 is not allowed. + + + ipaddr: String to check for validity. (It will check that this is a string). + + + True if a valid IP, False otherwise. + """ + # Argument must be of the string type + if not type(ipaddr) == str: + return False + + if ipaddr == '192.168.0.0': + log("IP 192.168.0.0 is not allowed to be used\n") + return False + + # A valid IP should have 4 segments, explode on the period + octets = ipaddr.split(".") + while '' in octets: + octets.remove('') + # Check that we have 4 parts + if len(octets) != 4: + log("IP address should of the four octet format X.X.X.X\n") + return False + + # Check that each segment is a number between 0 and 255 inclusively. + for octet in octets: + # Attempt to convert to an integer + try: + ipnumber = int(octet) + except ValueError: + # There was an error converting to an integer, not an IP + return False + + # IP addresses octets must be between 0 and 255 + if not (ipnumber >= 0 and ipnumber <= 255): + return False + + # should not have a ValueError (I already checked) + firstipnumber = int(octets[0]) + + # IP addresses with the first octet 0 refer to all local IPs. These are + # not allowed + if firstipnumber == 0: + return False + + # IP addresses with the first octet >=224 are either Multicast or reserved. + # These are not allowed + if firstipnumber >= 224: + return False + + # At this point, assume the IP is valid + return True + +if callfunc == 'initialize': + if len(callargs) > 1: + raise Exception("Too many call arguments") + elif len(callargs) == 1: + ipaddr = (callargs[0]) + _is_valid_ip_address(ipaddr) + From 67d3ffea6b9ea794121c5399e8222526456fdc26 Mon Sep 17 00:00:00 2001 From: asm582 Date: Fri, 17 Oct 2014 17:18:54 -0400 Subject: [PATCH 32/33] Added ut_monitor_send_gmail.py for unit testing --- test/ut_monitor_send_gmail.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 test/ut_monitor_send_gmail.py diff --git a/test/ut_monitor_send_gmail.py b/test/ut_monitor_send_gmail.py new file mode 100644 index 0000000..ba2c2b7 --- /dev/null +++ b/test/ut_monitor_send_gmail.py @@ -0,0 +1,16 @@ +import sys +import subprocess +import send_gmail +#pragma out read file C:\Users\abhishek\monitor_script\seattle_gmail_info +#pragma out loaded gmail info +#pragma out (True, '') + + +def main(): + gmail_file_name = "/home/abhishek/monitor_script/seattle_gmail_info" + result = send_gmail.init_gmail('test21119@gmail.com','testmail') + + +if __name__ == "__main__": + main() + From 9a165d167e89dac01ce0585af035317d9df4a908 Mon Sep 17 00:00:00 2001 From: asm582 Date: Fri, 17 Oct 2014 17:36:23 -0400 Subject: [PATCH 33/33] Update ut_monitor_send_gmail.py --- test/ut_monitor_send_gmail.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/ut_monitor_send_gmail.py b/test/ut_monitor_send_gmail.py index ba2c2b7..a9e7818 100644 --- a/test/ut_monitor_send_gmail.py +++ b/test/ut_monitor_send_gmail.py @@ -1,9 +1,8 @@ import sys import subprocess import send_gmail -#pragma out read file C:\Users\abhishek\monitor_script\seattle_gmail_info + #pragma out loaded gmail info -#pragma out (True, '') def main():