diff --git a/COPYING.txt b/COPYING.txt index 31469485007..898b2b74a7d 100644 --- a/COPYING.txt +++ b/COPYING.txt @@ -5,9 +5,9 @@ redistribute, modify and distribute modified versions." ------------------ Cython, which derives from Pyrex, is licensed under the Python -Software Foundation License. More precisely, all modifications +Software Foundation License. More precisely, all modifications made to go from Pyrex to Cython are so licensed. See LICENSE.txt for more details. - + diff --git a/Cython/Build/Dependencies.py b/Cython/Build/Dependencies.py index e4cd4fa80d8..05c5035ff97 100644 --- a/Cython/Build/Dependencies.py +++ b/Cython/Build/Dependencies.py @@ -36,7 +36,7 @@ def unquote(literal): return literals[literal[1:-1]] else: return literal - + return [unquote(item) for item in s.split(delimiter)] transitive_str = object() @@ -70,7 +70,7 @@ def line_iter(source): start = end+1 class DistutilsInfo(object): - + def __init__(self, source=None, exn=None): self.values = {} if source is not None: @@ -97,7 +97,7 @@ def __init__(self, source=None, exn=None): value = getattr(exn, key, None) if value: self.values[key] = value - + def merge(self, other): if other is None: return self @@ -114,7 +114,7 @@ def merge(self, other): else: self.values[key] = value return self - + def subs(self, aliases): if aliases is None: return self @@ -140,9 +140,9 @@ def subs(self, aliases): def strip_string_literals(code, prefix='__Pyx_L'): """ - Normalizes every string literal to be of the form '__Pyx_Lxxx', + Normalizes every string literal to be of the form '__Pyx_Lxxx', returning the normalized code and a mapping of labels to - string literals. + string literals. """ new_code = [] literals = {} @@ -156,7 +156,7 @@ def strip_string_literals(code, prefix='__Pyx_L'): double_q = code.find('"', q) q = min(single_q, double_q) if q == -1: q = max(single_q, double_q) - + # We're done. if q == -1 and hash_mark == -1: new_code.append(code[start:]) @@ -181,7 +181,7 @@ def strip_string_literals(code, prefix='__Pyx_L'): start = q else: q += 1 - + # Process comment. elif -1 != hash_mark and (hash_mark < q or q == -1): end = code.find('\n', hash_mark) @@ -212,7 +212,7 @@ def strip_string_literals(code, prefix='__Pyx_L'): new_code.append(code[start:end]) start = q q += len(in_quote) - + return "".join(new_code), literals @@ -245,16 +245,16 @@ def parse_dependencies(source_filename): class DependencyTree(object): - + def __init__(self, context): self.context = context self._transitive_cache = {} - + #@cached_method def parse_dependencies(self, source_filename): return parse_dependencies(source_filename) parse_dependencies = cached_method(parse_dependencies) - + #@cached_method def cimports_and_externs(self, filename): cimports, includes, externs = self.parse_dependencies(filename)[:3] @@ -272,10 +272,10 @@ def cimports_and_externs(self, filename): print("Unable to locate '%s' referenced from '%s'" % (filename, include)) return tuple(cimports), tuple(externs) cimports_and_externs = cached_method(cimports_and_externs) - + def cimports(self, filename): return self.cimports_and_externs(filename)[0] - + #@cached_method def package(self, filename): dir = os.path.dirname(filename) @@ -284,13 +284,13 @@ def package(self, filename): else: return () package = cached_method(package) - + #@cached_method def fully_qualifeid_name(self, filename): module = os.path.splitext(os.path.basename(filename))[0] return '.'.join(self.package(filename) + (module,)) fully_qualifeid_name = cached_method(fully_qualifeid_name) - + def find_pxd(self, module, filename=None): if module[0] == '.': raise NotImplementedError("New relative imports.") @@ -301,7 +301,7 @@ def find_pxd(self, module, filename=None): return pxd return self.context.find_pxd_file(module, None) find_pxd = cached_method(find_pxd) - + #@cached_method def cimported_files(self, filename): if filename[-4:] == '.pyx' and os.path.exists(filename[:-4] + '.pxd'): @@ -316,33 +316,33 @@ def cimported_files(self, filename): print("\n\t".join(b)) return tuple(self_pxd + filter(None, [self.find_pxd(m, filename) for m in self.cimports(filename)])) cimported_files = cached_method(cimported_files) - + def immediate_dependencies(self, filename): all = list(self.cimported_files(filename)) for extern in sum(self.cimports_and_externs(filename), ()): all.append(os.path.normpath(os.path.join(os.path.dirname(filename), extern))) return tuple(all) - + #@cached_method def timestamp(self, filename): return os.path.getmtime(filename) timestamp = cached_method(timestamp) - + def extract_timestamp(self, filename): # TODO: .h files from extern blocks return self.timestamp(filename), filename - + def newest_dependency(self, filename): return self.transitive_merge(filename, self.extract_timestamp, max) - + def distutils_info0(self, filename): return self.parse_dependencies(filename)[3] - + def distutils_info(self, filename, aliases=None, base=None): return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge) .subs(aliases) .merge(base)) - + def transitive_merge(self, node, extract, merge): try: seen = self._transitive_cache[extract, merge] @@ -350,7 +350,7 @@ def transitive_merge(self, node, extract, merge): seen = self._transitive_cache[extract, merge] = {} return self.transitive_merge_helper( node, extract, merge, seen, {}, self.cimported_files)[0] - + def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing): if node in seen: return seen[node], None diff --git a/Cython/Build/Inline.py b/Cython/Build/Inline.py index e72e50953e5..eec5401eac5 100644 --- a/Cython/Build/Inline.py +++ b/Cython/Build/Inline.py @@ -88,7 +88,7 @@ def safe_type(arg, context=None): return '%s.%s' % (base_type.__module__, base_type.__name__) return 'object' -def cython_inline(code, +def cython_inline(code, get_type=unsafe_type, lib_dir=os.path.expanduser('~/.cython/inline'), cython_include_dirs=['.'], @@ -252,14 +252,14 @@ def get_body(source): else: return source[ix+1:] -# Lots to be done here... It would be especially cool if compiled functions +# Lots to be done here... It would be especially cool if compiled functions # could invoke each other quickly. class RuntimeCompiledFunction(object): def __init__(self, f): self._f = f self._body = get_body(inspect.getsource(f)) - + def __call__(self, *args, **kwds): all = getcallargs(self._f, *args, **kwds) return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all) diff --git a/Cython/Build/Tests/TestInline.py b/Cython/Build/Tests/TestInline.py index 4a6376934bc..b9ffade83d7 100644 --- a/Cython/Build/Tests/TestInline.py +++ b/Cython/Build/Tests/TestInline.py @@ -32,7 +32,7 @@ def test_globals(self): self.assertEquals(inline("return global_value + 1", **test_kwds), global_value + 1) if has_numpy: - + def test_numpy(self): import numpy a = numpy.ndarray((10, 20)) diff --git a/Cython/CodeWriter.py b/Cython/CodeWriter.py index 0be3881729b..da6a03c7f62 100644 --- a/Cython/CodeWriter.py +++ b/Cython/CodeWriter.py @@ -14,14 +14,14 @@ class LinesResult(object): def __init__(self): self.lines = [] self.s = u"" - + def put(self, s): self.s += s - + def newline(self): self.lines.append(self.s) self.s = u"" - + def putline(self, s): self.put(s) self.newline() @@ -29,7 +29,7 @@ def putline(self, s): class CodeWriter(TreeVisitor): indent_string = u" " - + def __init__(self, result = None): super(CodeWriter, self).__init__() if result is None: @@ -38,22 +38,22 @@ def __init__(self, result = None): self.numindents = 0 self.tempnames = {} self.tempblockindex = 0 - + def write(self, tree): self.visit(tree) - + def indent(self): self.numindents += 1 - + def dedent(self): self.numindents -= 1 - + def startline(self, s = u""): self.result.put(self.indent_string * self.numindents + s) - + def put(self, s): self.result.put(s) - + def endline(self, s = u""): self.result.putline(s) @@ -70,13 +70,13 @@ def comma_separated_list(self, items, output_rhs=False): self.visit(item.default) self.put(u", ") self.visit(items[-1]) - + def visit_Node(self, node): raise AssertionError("Node not handled by serializer: %r" % node) - + def visit_ModuleNode(self, node): self.visitchildren(node) - + def visit_StatListNode(self, node): self.visitchildren(node) @@ -87,7 +87,7 @@ def visit_FuncDefNode(self, node): self.indent() self.visit(node.body) self.dedent() - + def visit_CArgDeclNode(self, node): if node.base_type.name is not None: self.visit(node.base_type) @@ -96,10 +96,10 @@ def visit_CArgDeclNode(self, node): if node.default is not None: self.put(u" = ") self.visit(node.default) - + def visit_CNameDeclaratorNode(self, node): self.put(node.name) - + def visit_CSimpleBaseTypeNode(self, node): # See Parsing.p_sign_and_longness if node.is_basic_c_type: @@ -108,16 +108,16 @@ def visit_CSimpleBaseTypeNode(self, node): self.put("short " * -node.longness) elif node.longness > 0: self.put("long " * node.longness) - + self.put(node.name) - + def visit_SingleAssignmentNode(self, node): self.startline() self.visit(node.lhs) self.put(u" = ") self.visit(node.rhs) self.endline() - + def visit_CascadedAssignmentNode(self, node): self.startline() for lhs in node.lhs_list: @@ -125,10 +125,10 @@ def visit_CascadedAssignmentNode(self, node): self.put(u" = ") self.visit(node.rhs) self.endline() - + def visit_NameNode(self, node): self.put(node.name) - + def visit_IntNode(self, node): self.put(node.value) @@ -164,7 +164,7 @@ def visit_IfStatNode(self, node): def visit_PassStatNode(self, node): self.startline(u"pass") self.endline() - + def visit_PrintStatNode(self, node): self.startline(u"print ") self.comma_separated_list(node.arg_tuple.args) @@ -176,7 +176,7 @@ def visit_BinopNode(self, node): self.visit(node.operand1) self.put(u" %s " % node.operator) self.visit(node.operand2) - + def visit_CVarDefNode(self, node): self.startline(u"cdef ") self.visit(node.base_type) @@ -201,7 +201,7 @@ def visit_ForInStatNode(self, node): def visit_SequenceNode(self, node): self.comma_separated_list(node.args) # Might need to discover whether we need () around tuples...hmm... - + def visit_SimpleCallNode(self, node): self.visit(node.function) self.put(u"(") @@ -224,14 +224,14 @@ def visit_ExprStatNode(self, node): self.startline() self.visit(node.expr) self.endline() - + def visit_InPlaceAssignmentNode(self, node): self.startline() self.visit(node.lhs) self.put(u" %s= " % node.operator) self.visit(node.rhs) self.endline() - + def visit_WithStatNode(self, node): self.startline() self.put(u"with ") @@ -243,7 +243,7 @@ def visit_WithStatNode(self, node): self.indent() self.visit(node.body) self.dedent() - + def visit_AttributeNode(self, node): self.visit(node.obj) self.put(u".%s" % node.attribute) diff --git a/Cython/Compiler/AnalysedTreeTransforms.py b/Cython/Compiler/AnalysedTreeTransforms.py index 79a0485e282..de527e00b5a 100644 --- a/Cython/Compiler/AnalysedTreeTransforms.py +++ b/Cython/Compiler/AnalysedTreeTransforms.py @@ -11,7 +11,7 @@ class AutoTestDictTransform(ScopeTrackingTransform): # Handles autotestdict directive - blacklist = ['__cinit__', '__dealloc__', '__richcmp__', + blacklist = ['__cinit__', '__dealloc__', '__richcmp__', '__nonzero__', '__bool__', '__len__', '__contains__'] diff --git a/Cython/Compiler/Annotate.py b/Cython/Compiler/Annotate.py index 425d956e89e..fe38378b37a 100644 --- a/Cython/Compiler/Annotate.py +++ b/Cython/Compiler/Annotate.py @@ -12,9 +12,9 @@ # need one-characters subsitutions (for now) so offsets aren't off special_chars = [(u'<', u'\xF0', u'<'), - (u'>', u'\xF1', u'>'), + (u'>', u'\xF1', u'>'), (u'&', u'\xF2', u'&')] - + line_pos_comment = re.compile(r'/\*.*?<<<<<<<<<<<<<<.*?\*/\n*', re.DOTALL) class AnnotationCCodeWriter(CCodeWriter): @@ -32,14 +32,14 @@ def __init__(self, create_from=None, buffer=None, copy_formatting=True): self.annotations = create_from.annotations self.code = create_from.code self.last_pos = create_from.last_pos - + def create_new(self, create_from, buffer, copy_formatting): return AnnotationCCodeWriter(create_from, buffer, copy_formatting) def write(self, s): CCodeWriter.write(self, s) self.annotation_buffer.write(s) - + def mark_pos(self, pos): if pos is not None: CCodeWriter.mark_pos(self, pos) @@ -52,7 +52,7 @@ def mark_pos(self, pos): def annotate(self, pos, item): self.annotations.append((pos, item)) - + def save_annotation(self, source_filename, target_filename): self.mark_pos(None) f = Utils.open_source_file(source_filename) @@ -74,7 +74,7 @@ def save_annotation(self, source_filename, target_filename): all.append(((source_filename, pos[1], pos[2]+size), end)) else: all.append((pos, start+end)) - + all.sort() all.reverse() for pos, item in all: @@ -83,7 +83,7 @@ def save_annotation(self, source_filename, target_filename): col += 1 line = lines[line_no] lines[line_no] = line[:col] + item + line[col:] - + html_filename = os.path.splitext(target_filename)[0] + ".html" f = codecs.open(html_filename, "w", encoding="UTF-8") f.write(u'\n') @@ -130,14 +130,14 @@ def save_annotation(self, source_filename, target_filename): c_file = Utils.decode_filename(os.path.basename(target_filename)) f.write(u'

Raw output: %s\n' % (c_file, c_file)) k = 0 - + py_c_api = re.compile(u'(Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]+)\(') py_marco_api = re.compile(u'(Py[A-Z][a-z]+_[A-Z][A-Z_]+)\(') pyx_c_api = re.compile(u'(__Pyx_[A-Z][a-z_][A-Za-z_]+)\(') pyx_macro_api = re.compile(u'(__Pyx_[A-Z][A-Z_]+)\(') error_goto = re.compile(ur'((; *if .*)? \{__pyx_filename = .*goto __pyx_L\w+;\})') refnanny = re.compile(u'(__Pyx_X?(GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)') - + code_source_file = self.code[source_filename] for line in lines: @@ -146,18 +146,18 @@ def save_annotation(self, source_filename, target_filename): code = code_source_file[k] except KeyError: code = '' - + code = code.replace('<', '<') - + code, py_c_api_calls = py_c_api.subn(ur"\1(", code) code, pyx_c_api_calls = pyx_c_api.subn(ur"\1(", code) code, py_macro_api_calls = py_marco_api.subn(ur"\1(", code) code, pyx_macro_api_calls = pyx_macro_api.subn(ur"\1(", code) code, refnanny_calls = refnanny.subn(ur"\1", code) code, error_goto_calls = error_goto.subn(ur"\1", code) - + code = code.replace(u";", u";") - + score = 5*py_c_api_calls + 2*pyx_c_api_calls + py_macro_api_calls + pyx_macro_api_calls - refnanny_calls color = u"FFFF%02x" % int(255/(1+score/10.0)) f.write(u"

" % (color, k))
@@ -166,13 +166,13 @@ def save_annotation(self, source_filename, target_filename):
             for c, cc, html in special_chars:
                 line = line.replace(cc, html)
             f.write(line.rstrip())
-                
+
             f.write(u'
\n') code = re.sub(line_pos_comment, '', code) # inline annotations are redundant f.write(u"
%s
" % (k, color, code)) f.write(u'\n') f.close() - + # TODO: make this cleaner def escape(raw_string): @@ -184,15 +184,15 @@ def escape(raw_string): class AnnotationItem(object): - + def __init__(self, style, text, tag="", size=0): self.style = style self.text = text self.tag = tag self.size = size - + def start(self): return u"%s" % (self.style, self.text, self.tag) - + def end(self): return self.size, u"" diff --git a/Cython/Compiler/AutoDocTransforms.py b/Cython/Compiler/AutoDocTransforms.py index 13859eac6be..0fcdd0e963c 100644 --- a/Cython/Compiler/AutoDocTransforms.py +++ b/Cython/Compiler/AutoDocTransforms.py @@ -101,7 +101,7 @@ def __call__(self, node): return node else: return super(EmbedSignature, self).__call__(node) - + def visit_ClassDefNode(self, node): oldname = self.class_name oldclass = self.class_node @@ -120,7 +120,7 @@ def visit_ClassDefNode(self, node): def visit_DefNode(self, node): if not self.current_directives['embedsignature']: return node - + is_constructor = False hide_self = False if node.entry.is_special: diff --git a/Cython/Compiler/Buffer.py b/Cython/Compiler/Buffer.py index a7f4c3d3b3d..3584234cf82 100644 --- a/Cython/Compiler/Buffer.py +++ b/Cython/Compiler/Buffer.py @@ -57,12 +57,12 @@ def handle_scope(self, node, scope): if isinstance(node, ModuleNode) and len(bufvars) > 0: - # for now...note that pos is wrong + # for now...note that pos is wrong raise CompileError(node.pos, "Buffer vars not allowed in module scope") for entry in bufvars: if entry.type.dtype.is_ptr: raise CompileError(node.pos, "Buffers with pointer types not yet supported.") - + name = entry.name buftype = entry.type if buftype.ndim > self.max_ndim: @@ -84,10 +84,10 @@ def var(prefix, idx, initval): if entry.is_arg: result.used = True return result - + stridevars = [var(Naming.bufstride_prefix, i, "0") for i in range(entry.type.ndim)] - shapevars = [var(Naming.bufshape_prefix, i, "0") for i in range(entry.type.ndim)] + shapevars = [var(Naming.bufshape_prefix, i, "0") for i in range(entry.type.ndim)] mode = entry.type.mode if mode == 'full': suboffsetvars = [var(Naming.bufsuboffset_prefix, i, "-1") for i in range(entry.type.ndim)] @@ -95,7 +95,7 @@ def var(prefix, idx, initval): suboffsetvars = None entry.buffer_aux = Symtab.BufferAux(bufinfo, stridevars, shapevars, suboffsetvars) - + scope.buffer_entries = bufvars self.scope = scope @@ -138,9 +138,9 @@ def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, nee """ if defaults is None: defaults = buffer_defaults - + posargs, dictargs = Interpreter.interpret_compiletime_options(posargs, dictargs, type_env=env, type_args = (0,'dtype')) - + if len(posargs) > buffer_positional_options_count: raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY) @@ -187,7 +187,7 @@ def assert_bool(name): assert_bool('cast') return options - + # # Code generation @@ -209,7 +209,7 @@ def get_flags(buffer_aux, buffer_type): assert False if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE" return flags - + def used_buffer_aux_vars(entry): buffer_aux = entry.buffer_aux buffer_aux.buffer_info_var.used = True @@ -258,10 +258,10 @@ def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type): bufstruct = buffer_aux.buffer_info_var.cname dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype) - + return ("__Pyx_GetBufferAndValidate(&%(bufstruct)s, " "(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, " - "%(cast)d, __pyx_stack)" % locals()) + "%(cast)d, __pyx_stack)" % locals()) def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type, is_initialized, pos, code): @@ -272,7 +272,7 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type, However, the assignment operation may throw an exception so that the reassignment never happens. - + Depending on the circumstances there are two possible outcomes: - Old buffer released, new acquired, rhs assigned to lhs - Old buffer released, new acquired which fails, reaqcuire old lhs buffer @@ -285,7 +285,7 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type, code.putln("{") # Set up necesarry stack for getbuffer code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth()) - + getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below if is_initialized: @@ -370,7 +370,7 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos, code.putln("%s = %d;" % (tmp_cname, dim)) code.put("} else ") # check bounds in positive direction - if signed != 0: + if signed != 0: cast = "" else: cast = "(size_t)" @@ -389,7 +389,7 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos, bufaux.shapevars): if signed != 0: code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape.cname)) - + # Create buffer lookup and return it # This is done via utility macros/inline functions, which vary # according to the access mode used. @@ -418,7 +418,7 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos, for i, s in zip(index_cnames, bufaux.stridevars): params.append(i) params.append(s.cname) - + # Make sure the utility code is available if funcname not in code.globalstate.utility_codes: code.globalstate.utility_codes.add(funcname) @@ -458,7 +458,7 @@ def buf_lookup_full_code(proto, defin, name, nd): char* ptr = (char*)buf; """) % (name, funcargs) + "".join([dedent("""\ ptr += s%d * i%d; - if (o%d >= 0) ptr = *((char**)ptr) + o%d; + if (o%d >= 0) ptr = *((char**)ptr) + o%d; """) % (i, i, i, i) for i in range(nd)] ) + "\nreturn ptr;\n}") @@ -563,7 +563,7 @@ def find_buffer_types(scope): #endif """) - + env.use_utility_code(UtilityCode( proto = dedent("""\ #if PY_MAJOR_VERSION < 3 @@ -613,9 +613,9 @@ def get_type_information_cname(code, dtype, maxdepth=None): if name not in code.globalstate.utility_codes: code.globalstate.utility_codes.add(name) typecode = code.globalstate['typeinfo'] - + complex_possible = dtype.is_struct_or_union and dtype.can_be_complex() - + declcode = dtype.declaration_code("") if dtype.is_simple_buffer_dtype(): structinfo_name = "NULL" @@ -634,7 +634,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): typecode.putln("};", safe=True) else: assert False - + rep = str(dtype) if dtype.is_int: if dtype.signed == 0: @@ -851,7 +851,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; - } + } } } @@ -895,7 +895,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; - } + } } } @@ -932,7 +932,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; - + if (ctx->packmode == '@' || ctx->packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { @@ -955,7 +955,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): ctx->head->parent_offset = parent_offset; continue; } - + __Pyx_BufFmt_RaiseExpected(ctx); return -1; } @@ -969,7 +969,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): } ctx->fmt_offset += size; - + --ctx->enc_count; /* Consume from buffer string */ /* Done checking, move to next field, pushing or popping struct stack if needed */ @@ -1002,7 +1002,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; - return 0; + return 0; } static int __Pyx_BufFmt_FirstPack(__Pyx_BufFmt_Context* ctx) { @@ -1124,7 +1124,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): return NULL; } } - + } } } diff --git a/Cython/Compiler/Builtin.py b/Cython/Compiler/Builtin.py index 196a2094a09..296f1cd3d1e 100644 --- a/Cython/Compiler/Builtin.py +++ b/Cython/Compiler/Builtin.py @@ -438,14 +438,14 @@ def declare_in_type(self, self_type): ("type", "PyType_Type", []), # This conflicts with the C++ bool type, and unfortunately -# C++ is too liberal about PyObject* <-> bool conversions, +# C++ is too liberal about PyObject* <-> bool conversions, # resulting in unintuitive runtime behavior and segfaults. # ("bool", "PyBool_Type", []), ("int", "PyInt_Type", []), ("long", "PyLong_Type", []), ("float", "PyFloat_Type", []), - + ("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'), BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type), BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type), @@ -474,7 +474,7 @@ def declare_in_type(self, self_type): ]), # ("file", "PyFile_Type", []), # not in Py3 - ("set", "PySet_Type", [BuiltinMethod("clear", "T", "i", "PySet_Clear"), + ("set", "PySet_Type", [BuiltinMethod("clear", "T", "i", "PySet_Clear"), BuiltinMethod("discard", "TO", "i", "PySet_Discard"), BuiltinMethod("add", "TO", "i", "PySet_Add"), BuiltinMethod("pop", "T", "O", "PySet_Pop")]), @@ -490,7 +490,7 @@ def declare_in_type(self, self_type): # 'file', # only in Py2.x ) - + builtin_structs_table = [ ('Py_buffer', 'Py_buffer', [("buf", PyrexTypes.c_void_ptr_type), diff --git a/Cython/Compiler/CmdLine.py b/Cython/Compiler/CmdLine.py index 05ec9125237..061610f0bce 100644 --- a/Cython/Compiler/CmdLine.py +++ b/Cython/Compiler/CmdLine.py @@ -24,9 +24,9 @@ -v, --verbose Be verbose, print file names on multiple compilation -p, --embed-positions If specified, the positions in Cython files of each function definition is embedded in its docstring. - --cleanup Release interned objects on python exit, for memory debugging. - Level indicates aggressiveness, default 0 releases nothing. - -w, --working Sets the working directory for Cython (the directory modules + --cleanup Release interned objects on python exit, for memory debugging. + Level indicates aggressiveness, default 0 releases nothing. + -w, --working Sets the working directory for Cython (the directory modules are searched from) --gdb Output debug information for cygdb @@ -65,7 +65,7 @@ def pop_arg(): return args.pop(0) else: bad_usage() - + def get_param(option): tail = option[2:] if tail: diff --git a/Cython/Compiler/Code.py b/Cython/Compiler/Code.py index c3ed37186a2..5134dcf27aa 100644 --- a/Cython/Compiler/Code.py +++ b/Cython/Compiler/Code.py @@ -29,7 +29,7 @@ class UtilityCode(object): # See GlobalState.put_utility_code. # # hashes/equals by instance - + def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None, proto_block='utility_code_proto'): # proto_block: Which code block to dump prototype in. See GlobalState. @@ -84,8 +84,8 @@ def put_code(self, output): writer.put(self.cleanup) else: self.cleanup(writer, output.module_pos) - - + + class FunctionState(object): # return_label string function return point label @@ -101,7 +101,7 @@ class FunctionState(object): def __init__(self, owner, names_taken=cython.set()): self.names_taken = names_taken self.owner = owner - + self.error_label = None self.label_counter = 0 self.labels_used = cython.set() @@ -127,28 +127,28 @@ def new_label(self, name=None): if name is not None: label += '_' + name return label - + def new_error_label(self): old_err_lbl = self.error_label self.error_label = self.new_label('error') return old_err_lbl - + def get_loop_labels(self): return ( self.continue_label, self.break_label) - + def set_loop_labels(self, labels): (self.continue_label, self.break_label) = labels - + def new_loop_labels(self): old_labels = self.get_loop_labels() self.set_loop_labels( - (self.new_label("continue"), + (self.new_label("continue"), self.new_label("break"))) return old_labels - + def get_all_labels(self): return ( self.continue_label, @@ -172,10 +172,10 @@ def all_new_labels(self): new_labels.append(old_label) self.set_all_labels(new_labels) return old_labels - + def use_label(self, lbl): self.labels_used.add(lbl) - + def label_used(self, lbl): return lbl in self.labels_used @@ -402,7 +402,7 @@ class GlobalState(object): # parts {string:CCodeWriter} - + # interned_strings # consts # interned_nums @@ -438,7 +438,7 @@ class GlobalState(object): 'utility_code_def', 'end' ] - + def __init__(self, writer, emit_linenums=False): self.filename_table = {} @@ -556,7 +556,7 @@ def close_global_decls(self): w = self.parts['cleanup_module'] w.putln("}") w.exit_cfunc_scope() - + def put_pyobject_decl(self, entry): self['global_var'].putln("static PyObject *%s;" % entry.cname) @@ -764,7 +764,7 @@ def generate_int_constants(self): # The functions below are there in a transition phase only # and will be deprecated. They are called from Nodes.BlockNode. # The copy&paste duplication is intentional in order to be able - # to see quickly how BlockNode worked, until this is replaced. + # to see quickly how BlockNode worked, until this is replaced. def should_declare(self, cname, entry): if cname in self.declared_cnames: @@ -813,7 +813,7 @@ def commented_file_contents(self, source_desc): # # Utility code state # - + def use_utility_code(self, utility_code): """ Adds code to the C file. utility_code should @@ -859,10 +859,10 @@ class CCodeWriter(object): - filename_table, filename_list, input_file_contents: All codewriters coming from the same root share the same instances simultaneously. """ - + # f file output file # buffer StringIOTree - + # level int indentation level # bol bool beginning of line? # marker string comment to emit before next line @@ -876,7 +876,7 @@ class CCodeWriter(object): # about the current class one is in globalstate = None - + def __init__(self, create_from=None, buffer=None, copy_formatting=False, emit_linenums=None): if buffer is None: buffer = StringIOTree() self.buffer = buffer @@ -884,7 +884,7 @@ def __init__(self, create_from=None, buffer=None, copy_formatting=False, emit_li self.last_marker_line = 0 self.source_desc = "" self.pyclass_stack = [] - + self.funcstate = None self.level = 0 self.call_level = 0 @@ -916,13 +916,13 @@ def getvalue(self): return self.buffer.getvalue() def write(self, s): - # also put invalid markers (lineno 0), to indicate that those lines + # also put invalid markers (lineno 0), to indicate that those lines # have no Cython source code correspondence if self.marker is None: cython_lineno = self.last_marker_line else: cython_lineno = self.marker[0] - + self.buffer.markers.extend([cython_lineno] * s.count('\n')) self.buffer.write(s) @@ -971,7 +971,7 @@ def label_used(self, lbl): return self.funcstate.label_used(lbl) def enter_cfunc_scope(self): self.funcstate = FunctionState(self) - + def exit_cfunc_scope(self): self.funcstate = None @@ -1008,7 +1008,7 @@ def putln(self, code = "", safe=False): self.emit_marker() if self.emit_linenums and self.last_marker_line != 0: self.write('\n#line %s "%s"\n' % (self.last_marker_line, self.source_desc)) - + if code: if safe: self.put_safe(code) @@ -1016,7 +1016,7 @@ def putln(self, code = "", safe=False): self.put(code) self.write("\n"); self.bol = 1 - + def emit_marker(self): self.write("\n"); self.indent() @@ -1054,18 +1054,18 @@ def put(self, code): def increase_indent(self): self.level = self.level + 1 - + def decrease_indent(self): self.level = self.level - 1 - + def begin_block(self): self.putln("{") self.increase_indent() - + def end_block(self): self.decrease_indent() self.putln("}") - + def indent(self): self.write(" " * self.level) @@ -1089,21 +1089,21 @@ def mark_pos(self, pos): self.marker = (line, marker) if self.emit_linenums: self.source_desc = source_desc.get_escaped_description() - + def put_label(self, lbl): if lbl in self.funcstate.labels_used: self.putln("%s:;" % lbl) - + def put_goto(self, lbl): self.funcstate.use_label(lbl) self.putln("goto %s;" % lbl) - + def put_var_declarations(self, entries, static = 0, dll_linkage = None, definition = True): for entry in entries: if not entry.in_cinclude: self.put_var_declaration(entry, static, dll_linkage, definition) - + def put_var_declaration(self, entry, static = 0, dll_linkage = None, definition = True): #print "Code.put_var_declaration:", entry.name, "definition =", definition ### @@ -1146,7 +1146,7 @@ def put_temp_declarations(self, func_context): def put_h_guard(self, guard): self.putln("#ifndef %s" % guard) self.putln("#define %s" % guard) - + def unlikely(self, cond): if Options.gcc_branch_hints: return 'unlikely(%s)' % cond @@ -1162,17 +1162,17 @@ def entry_as_pyobject(self, entry): return "(PyObject *)" + entry.cname else: return entry.cname - + def as_pyobject(self, cname, type): from PyrexTypes import py_object_type, typecast return typecast(py_object_type, type, cname) - + def put_gotref(self, cname): self.putln("__Pyx_GOTREF(%s);" % cname) - + def put_giveref(self, cname): self.putln("__Pyx_GIVEREF(%s);" % cname) - + def put_xgiveref(self, cname): self.putln("__Pyx_XGIVEREF(%s);" % cname) @@ -1184,7 +1184,7 @@ def put_incref(self, cname, type, nanny=True): self.putln("__Pyx_INCREF(%s);" % self.as_pyobject(cname, type)) else: self.putln("Py_INCREF(%s);" % self.as_pyobject(cname, type)) - + def put_decref(self, cname, type, nanny=True): if nanny: self.putln("__Pyx_DECREF(%s);" % self.as_pyobject(cname, type)) @@ -1194,7 +1194,7 @@ def put_decref(self, cname, type, nanny=True): def put_var_gotref(self, entry): if entry.type.is_pyobject: self.putln("__Pyx_GOTREF(%s);" % self.entry_as_pyobject(entry)) - + def put_var_giveref(self, entry): if entry.type.is_pyobject: self.putln("__Pyx_GIVEREF(%s);" % self.entry_as_pyobject(entry)) @@ -1210,7 +1210,7 @@ def put_var_xgiveref(self, entry): def put_var_incref(self, entry): if entry.type.is_pyobject: self.putln("__Pyx_INCREF(%s);" % self.entry_as_pyobject(entry)) - + def put_decref_clear(self, cname, type, nanny=True): from PyrexTypes import py_object_type, typecast if nanny: @@ -1219,13 +1219,13 @@ def put_decref_clear(self, cname, type, nanny=True): else: self.putln("Py_DECREF(%s); %s = 0;" % ( typecast(py_object_type, type, cname), cname)) - + def put_xdecref(self, cname, type, nanny=True): if nanny: self.putln("__Pyx_XDECREF(%s);" % self.as_pyobject(cname, type)) else: self.putln("Py_XDECREF(%s);" % self.as_pyobject(cname, type)) - + def put_xdecref_clear(self, cname, type, nanny=True): if nanny: self.putln("__Pyx_XDECREF(%s); %s = 0;" % ( @@ -1240,21 +1240,21 @@ def put_var_decref(self, entry): self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry)) else: self.putln("__Pyx_DECREF(%s);" % self.entry_as_pyobject(entry)) - + def put_var_decref_clear(self, entry): if entry.type.is_pyobject: self.putln("__Pyx_DECREF(%s); %s = 0;" % ( self.entry_as_pyobject(entry), entry.cname)) - + def put_var_xdecref(self, entry): if entry.type.is_pyobject: self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry)) - + def put_var_xdecref_clear(self, entry): if entry.type.is_pyobject: self.putln("__Pyx_XDECREF(%s); %s = 0;" % ( self.entry_as_pyobject(entry), entry.cname)) - + def put_var_decrefs(self, entries, used_only = 0): for entry in entries: if not used_only or entry.used: @@ -1262,15 +1262,15 @@ def put_var_decrefs(self, entries, used_only = 0): self.put_var_xdecref(entry) else: self.put_var_decref(entry) - + def put_var_xdecrefs(self, entries): for entry in entries: self.put_var_xdecref(entry) - + def put_var_xdecrefs_clear(self, entries): for entry in entries: self.put_var_xdecref_clear(entry) - + def put_init_to_py_none(self, cname, type, nanny=True): from PyrexTypes import py_object_type, typecast py_none = typecast(type, py_object_type, "Py_None") @@ -1278,7 +1278,7 @@ def put_init_to_py_none(self, cname, type, nanny=True): self.putln("%s = %s; __Pyx_INCREF(Py_None);" % (cname, py_none)) else: self.putln("%s = %s; Py_INCREF(Py_None);" % (cname, py_none)) - + def put_init_var_to_py_none(self, entry, template = "%s", nanny=True): code = template % entry.cname #if entry.type.is_extension_type: @@ -1306,7 +1306,7 @@ def put_pymethoddef(self, entry, term, allow_skip=True): method_flags += [method_coexist] self.putln( '{__Pyx_NAMESTR("%s"), (PyCFunction)%s, %s, __Pyx_DOCSTR(%s)}%s' % ( - entry.name, + entry.name, entry.func_cname, "|".join(method_flags), doc_code, @@ -1330,7 +1330,7 @@ def set_error_info(self, pos): Naming.lineno_cname, pos[1], cinfo) - + def error_goto(self, pos): lbl = self.funcstate.error_label self.funcstate.use_label(lbl) @@ -1340,16 +1340,16 @@ def error_goto(self, pos): def error_goto_if(self, cond, pos): return "if (%s) %s" % (self.unlikely(cond), self.error_goto(pos)) - + def error_goto_if_null(self, cname, pos): return self.error_goto_if("!%s" % cname, pos) - + def error_goto_if_neg(self, cname, pos): return self.error_goto_if("%s < 0" % cname, pos) - + def error_goto_if_PyErr(self, pos): return self.error_goto_if("PyErr_Occurred()", pos) - + def lookup_filename(self, filename): return self.globalstate.lookup_filename(filename) @@ -1361,13 +1361,13 @@ def put_finish_refcount_context(self): def put_trace_declarations(self): self.putln('__Pyx_TraceDeclarations'); - + def put_trace_call(self, name, pos): self.putln('__Pyx_TraceCall("%s", %s[%s], %s);' % (name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1])); - + def put_trace_exception(self): self.putln("__Pyx_TraceException();") - + def put_trace_return(self, retvalue_cname): self.putln("__Pyx_TraceReturn(%s);" % retvalue_cname) @@ -1379,13 +1379,13 @@ class PyrexCodeWriter(object): def __init__(self, outfile_name): self.f = Utils.open_new_file(outfile_name) self.level = 0 - + def putln(self, code): self.f.write("%s%s\n" % (" " * self.level, code)) - + def indent(self): self.level += 1 - + def dedent(self): self.level -= 1 diff --git a/Cython/Compiler/ControlFlow.py b/Cython/Compiler/ControlFlow.py index f63d52caa09..cadcdf5ad06 100644 --- a/Cython/Compiler/ControlFlow.py +++ b/Cython/Compiler/ControlFlow.py @@ -1,17 +1,17 @@ import bisect, sys -# This module keeps track of arbitrary "states" at any point of the code. +# This module keeps track of arbitrary "states" at any point of the code. # A state is considered known if every path to the given point agrees on -# its state, otherwise it is None (i.e. unknown). +# its state, otherwise it is None (i.e. unknown). -# It might be useful to be able to "freeze" the set of states by pushing +# It might be useful to be able to "freeze" the set of states by pushing # all state changes to the tips of the trees for fast reading. Perhaps this -# could be done on get_state, clearing the cache on set_state (assuming -# incoming is immutable). +# could be done on get_state, clearing the cache on set_state (assuming +# incoming is immutable). -# This module still needs a lot of work, and probably should totally be -# redesigned. It doesn't take return, raise, continue, or break into -# account. +# This module still needs a lot of work, and probably should totally be +# redesigned. It doesn't take return, raise, continue, or break into +# account. from Cython.Compiler.Scanning import StringSourceDescriptor try: @@ -31,26 +31,26 @@ def __init__(self, start_pos, incoming, parent): self.parent = parent self.tip = {} self.end_pos = _END_POS - + def start_branch(self, pos): self.end_pos = pos branch_point = BranchingControlFlow(pos, self) if self.parent is not None: self.parent.branches[-1] = branch_point return branch_point.branches[0] - + def next_branch(self, pos): self.end_pos = pos return self.parent.new_branch(pos) - + def finish_branch(self, pos): self.end_pos = pos self.parent.end_pos = pos return LinearControlFlow(pos, self.parent) - + def get_state(self, item, pos=_END_POS): return self.get_pos_state(item, pos)[1] - + def get_pos_state(self, item, pos=_END_POS): # do some caching if pos > self.end_pos: @@ -86,14 +86,14 @@ def set_state(self, pos, item, state): if item in current.tip: del current.tip[item] current._set_state_local(pos, item, state) - - + + class LinearControlFlow(ControlFlow): def __init__(self, start_pos=(), incoming=None, parent=None): ControlFlow.__init__(self, start_pos, incoming, parent) self.events = {} - + def _set_state_local(self, pos, item, state): if item in self.events: event_list = self.events[item] @@ -111,10 +111,10 @@ def _get_pos_state_local(self, item, pos): return None def to_string(self, indent='', limit=None): - + if len(self.events) == 0: s = indent + "[no state changes]" - + else: all = [] for item, event_list in self.events.items(): @@ -126,21 +126,21 @@ def to_string(self, indent='', limit=None): if self.incoming is not limit and self.incoming is not None: s = "%s\n%s" % (self.incoming.to_string(indent, limit=limit), s) return s - - + + class BranchingControlFlow(ControlFlow): - + def __init__(self, start_pos, incoming, parent=None): ControlFlow.__init__(self, start_pos, incoming, parent) self.branches = [LinearControlFlow(start_pos, incoming, parent=self)] self.branch_starts = [start_pos] - + def _set_state_local(self, pos, item, state): for branch_pos, branch in zip(self.branch_starts[::-1], self.branches[::-1]): if pos >= branch_pos: branch._set_state_local(pos, item, state) return - + def _get_pos_state_local(self, item, pos, stop_at=None): if pos < self.end_pos: for branch_pos, branch in zip(self.branch_starts[::-1], self.branches[::-1]): diff --git a/Cython/Compiler/CythonScope.py b/Cython/Compiler/CythonScope.py index 54dc38666d5..deffaffb4b6 100644 --- a/Cython/Compiler/CythonScope.py +++ b/Cython/Compiler/CythonScope.py @@ -15,9 +15,9 @@ def __init__(self, context): pos=None, defining = 1, cname='') - + def lookup_type(self, name): - # This function should go away when types are all first-level objects. + # This function should go away when types are all first-level objects. type = parse_basic_type(name) if type: return type @@ -32,12 +32,12 @@ def create_utility_scope(context): utility_scope = ModuleScope(u'utility', None, context) # These are used to optimize isinstance in FinalOptimizePhase - type_object = utility_scope.declare_typedef('PyTypeObject', - base_type = c_void_type, + type_object = utility_scope.declare_typedef('PyTypeObject', + base_type = c_void_type, pos = None, cname = 'PyTypeObject') type_object.is_void = True - + utility_scope.declare_cfunction( 'PyObject_TypeCheck', CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None), @@ -45,5 +45,5 @@ def create_utility_scope(context): pos = None, defining = 1, cname = 'PyObject_TypeCheck') - + return utility_scope diff --git a/Cython/Compiler/Errors.py b/Cython/Compiler/Errors.py index 0e9954b8671..401125bbe57 100644 --- a/Cython/Compiler/Errors.py +++ b/Cython/Compiler/Errors.py @@ -44,7 +44,7 @@ def format_error(message, position): return message class CompileError(PyrexError): - + def __init__(self, position = None, message = u""): self.position = position self.message_only = message @@ -54,7 +54,7 @@ def __init__(self, position = None, message = u""): Exception.__init__(self, format_error(message, position)) class CompileWarning(PyrexWarning): - + def __init__(self, position = None, message = ""): self.position = position # Deprecated and withdrawn in 2.6: @@ -63,7 +63,7 @@ def __init__(self, position = None, message = ""): class InternalError(Exception): # If this is ever raised, there is a bug in the compiler. - + def __init__(self, message): self.message_only = message Exception.__init__(self, u"Internal compiler error: %s" @@ -71,7 +71,7 @@ def __init__(self, message): class AbortError(Exception): # Throw this to stop the compilation immediately. - + def __init__(self, message): self.message_only = message Exception.__init__(self, u"Abort error: %s" % message) @@ -98,7 +98,7 @@ def __init__(self, pos, context, message, cause, stacktrace=None): CompileError.__init__(self, pos, message) class NoElementTreeInstalledException(PyrexError): - """raised when the user enabled options.gdb_debug but no ElementTree + """raised when the user enabled options.gdb_debug but no ElementTree implementation was found """ @@ -155,7 +155,7 @@ def error(position, message): #print "Errors.error:", repr(position), repr(message) ### if position is None: raise InternalError(message) - err = CompileError(position, message) + err = CompileError(position, message) if debug_exception_on_error: raise Exception(err) # debug report_error(err) return err @@ -198,7 +198,7 @@ def warn_once(position, message, level=0): return warn -# These functions can be used to momentarily suppress errors. +# These functions can be used to momentarily suppress errors. error_stack = [] diff --git a/Cython/Compiler/ExprNodes.py b/Cython/Compiler/ExprNodes.py index 0e89b311ff0..6eb7aa6a92d 100755 --- a/Cython/Compiler/ExprNodes.py +++ b/Cython/Compiler/ExprNodes.py @@ -70,14 +70,14 @@ class ExprNode(Node): # result_code string Code fragment # result_ctype string C type of result_code if different from type # is_temp boolean Result is in a temporary variable - # is_sequence_constructor + # is_sequence_constructor # boolean Is a list or tuple constructor expression # is_starred boolean Is a starred expression (e.g. '*a') # saved_subexpr_nodes # [ExprNode or [ExprNode or None] or None] # Cached result of subexpr_nodes() # use_managed_ref boolean use ref-counted temps/assignments/etc. - + result_ctype = None type = None temp_code = None @@ -110,8 +110,8 @@ class ExprNode(Node): # 'subexprs' class attribute of ExprNodes, which should # contain a list of the names of attributes which can # hold sub-nodes or sequences of sub-nodes. - # - # The framework makes use of a number of abstract methods. + # + # The framework makes use of a number of abstract methods. # Their responsibilities are as follows. # # Declaration Analysis phase @@ -134,7 +134,7 @@ class ExprNode(Node): # # analyse_target_types # Called during the Analyse Types phase to analyse - # the LHS of an assignment or argument of a del + # the LHS of an assignment or argument of a del # statement. Similar responsibilities to analyse_types. # # target_code @@ -145,9 +145,9 @@ class ExprNode(Node): # check_const # - Check that this node and its subnodes form a # legal constant expression. If so, do nothing, - # otherwise call not_const. + # otherwise call not_const. # - # The default implementation of check_const + # The default implementation of check_const # assumes that the expression is not constant. # # check_const_addr @@ -156,7 +156,7 @@ class ExprNode(Node): # constant. Otherwise, call addr_not_const. # # The default implementation of calc_const_addr - # assumes that the expression is not a constant + # assumes that the expression is not a constant # lvalue. # # Code Generation phase @@ -177,8 +177,8 @@ class ExprNode(Node): # sub-expressions. # # calculate_result_code - # - Should return a C code fragment evaluating to the - # result. This is only called when the result is not + # - Should return a C code fragment evaluating to the + # result. This is only called when the result is not # a temporary. # # generate_assignment_code @@ -196,10 +196,10 @@ class ExprNode(Node): # - Call generate_disposal_code on all sub-expressions. # # - + is_sequence_constructor = 0 is_attribute = 0 - + saved_subexpr_nodes = None is_temp = 0 is_target = 0 @@ -215,16 +215,16 @@ def __get_child_attrs(self): return self.subexprs _get_child_attrs = __get_child_attrs child_attrs = property(fget=_get_child_attrs) - + def not_implemented(self, method_name): print_call_chain(method_name, "not implemented") ### raise InternalError( "%s.%s not implemented" % (self.__class__.__name__, method_name)) - + def is_lvalue(self): return 0 - + def is_ephemeral(self): # An ephemeral node is one whose result is in # a Python temporary and we suspect there are no @@ -245,21 +245,21 @@ def subexpr_nodes(self): else: nodes.append(item) return nodes - + def result(self): if self.is_temp: return self.temp_code else: return self.calculate_result_code() - + def result_as(self, type = None): # Return the result code cast to the specified C type. return typecast(type, self.ctype(), self.result()) - + def py_result(self): # Return the result code cast to PyObject *. return self.result_as(py_object_type) - + def ctype(self): # Return the native C type of the result (i.e. the # C type of the result_code expression). @@ -295,18 +295,18 @@ def has_constant_result(self): def compile_time_value(self, denv): # Return value of compile-time expression, or report error. error(self.pos, "Invalid compile-time expression") - + def compile_time_value_error(self, e): error(self.pos, "Error in compile-time expression: %s: %s" % ( e.__class__.__name__, e)) - + # ------------- Declaration Analysis ---------------- - + def analyse_target_declaration(self, env): error(self.pos, "Cannot assign to or delete this") - + # ------------- Expression Analysis ---------------- - + def analyse_const_expression(self, env): # Called during the analyse_declarations phase of a # constant expression. Analyses the expression's type, @@ -314,25 +314,25 @@ def analyse_const_expression(self, env): # and determines its value. self.analyse_types(env) return self.check_const() - + def analyse_expressions(self, env): # Convenience routine performing both the Type - # Analysis and Temp Allocation phases for a whole + # Analysis and Temp Allocation phases for a whole # expression. self.analyse_types(env) - + def analyse_target_expression(self, env, rhs): # Convenience routine performing both the Type # Analysis and Temp Allocation phases for the LHS of # an assignment. self.analyse_target_types(env) - + def analyse_boolean_expression(self, env): # Analyse expression and coerce to a boolean. self.analyse_types(env) bool = self.coerce_to_boolean(env) return bool - + def analyse_temp_boolean_expression(self, env): # Analyse boolean expression and coerce result into # a temporary. This is used when a branch is to be @@ -345,17 +345,17 @@ def analyse_temp_boolean_expression(self, env): return self.coerce_to_boolean(env).coerce_to_simple(env) # --------------- Type Inference ----------------- - + def type_dependencies(self, env): # Returns the list of entries whose types must be determined # before the type of self can be infered. if hasattr(self, 'type') and self.type is not None: return () return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ()) - + def infer_type(self, env): - # Attempt to deduce the type of self. - # Differs from analyse_types as it avoids unnecessary + # Attempt to deduce the type of self. + # Differs from analyse_types as it avoids unnecessary # analysis of subexpressions, but can assume everything # in self.type_dependencies() has been resolved. if hasattr(self, 'type') and self.type is not None: @@ -364,27 +364,27 @@ def infer_type(self, env): return self.entry.type else: self.not_implemented("infer_type") - + # --------------- Type Analysis ------------------ - + def analyse_as_module(self, env): # If this node can be interpreted as a reference to a # cimported module, return its scope, else None. return None - + def analyse_as_type(self, env): # If this node can be interpreted as a reference to a # type, return that type, else None. return None - + def analyse_as_extension_type(self, env): # If this node can be interpreted as a reference to an # extension type, return its type, else None. return None - + def analyse_types(self, env): self.not_implemented("analyse_types") - + def analyse_target_types(self, env): self.analyse_types(env) @@ -402,33 +402,33 @@ def gil_assignment_check(self, env): def check_const(self): self.not_const() return False - + def not_const(self): error(self.pos, "Not allowed in a constant expression") - + def check_const_addr(self): self.addr_not_const() return False - + def addr_not_const(self): error(self.pos, "Address is not constant") # ----------------- Result Allocation ----------------- - + def result_in_temp(self): # Return true if result is in a temporary owned by # this node or one of its subexpressions. Overridden # by certain nodes which can share the result of # a subnode. return self.is_temp - + def target_code(self): # Return code fragment for use as LHS of a C assignment. return self.calculate_result_code() - + def calculate_result_code(self): self.not_implemented("calculate_result_code") - + # def release_target_temp(self, env): # # Release temporaries used by LHS of an assignment. # self.release_subexpr_temps(env) @@ -458,16 +458,16 @@ def release_temp_result(self, code): self.temp_code = None # ---------------- Code Generation ----------------- - + def make_owned_reference(self, code): # If result is a pyobject, make sure we own # a reference to it. if self.type.is_pyobject and not self.result_in_temp(): code.put_incref(self.result(), self.ctype()) - + def generate_evaluation_code(self, code): code.mark_pos(self.pos) - + # Generate code to evaluate this node and # its sub-expressions, and dispose of any # temporary results of its sub-expressions. @@ -486,10 +486,10 @@ def generate_evaluation_code(self, code): def generate_subexpr_evaluation_code(self, code): for node in self.subexpr_nodes(): node.generate_evaluation_code(code) - + def generate_result_code(self, code): self.not_implemented("generate_result_code") - + def generate_disposal_code(self, code): if self.is_temp: if self.type.is_pyobject: @@ -503,7 +503,7 @@ def generate_subexpr_disposal_code(self, code): # of all sub-expressions. for node in self.subexpr_nodes(): node.generate_disposal_code(code) - + def generate_post_assignment_code(self, code): if self.is_temp: if self.type.is_pyobject: @@ -513,10 +513,10 @@ def generate_post_assignment_code(self, code): def generate_assignment_code(self, rhs, code): # Stub method for nodes which are not legal as - # the LHS of an assignment. An error will have + # the LHS of an assignment. An error will have # been reported earlier. pass - + def generate_deletion_code(self, code): # Stub method for nodes that are not legal as # the argument of a del statement. An error @@ -529,7 +529,7 @@ def free_temps(self, code): self.release_temp_result(code) else: self.free_subexpr_temps(code) - + def free_subexpr_temps(self, code): for sub in self.subexpr_nodes(): sub.free_temps(code) @@ -538,13 +538,13 @@ def generate_function_definitions(self, env, code): pass # ---------------- Annotation --------------------- - + def annotate(self, code): for node in self.subexpr_nodes(): node.annotate(code) - + # ----------------- Coercion ---------------------- - + def coerce_to(self, dst_type, env): # Coerce the result so that it can be assigned to # something of type dst_type. If processing is necessary, @@ -571,7 +571,7 @@ def coerce_to(self, dst_type, env): if dst_type.is_reference: dst_type = dst_type.ref_base_type - + if dst_type.is_pyobject: if not src.type.is_pyobject: if dst_type is bytes_type and src.type.is_int: @@ -583,7 +583,7 @@ def coerce_to(self, dst_type, env): src = PyTypeTestNode(src, dst_type, env) elif src.type.is_pyobject: src = CoerceFromPyTypeNode(dst_type, src, env) - elif (dst_type.is_complex + elif (dst_type.is_complex and src_type != dst_type and dst_type.assignable_from(src_type)): src = CoerceToComplexNode(src, dst_type, env) @@ -628,31 +628,31 @@ def coerce_to_boolean(self, env): return CoerceToBooleanNode(self, env) else: if not (type.is_int or type.is_enum or type.is_error): - error(self.pos, + error(self.pos, "Type '%s' not acceptable as a boolean" % type) return self - + def coerce_to_integer(self, env): # If not already some C integer type, coerce to longint. if self.type.is_int: return self else: return self.coerce_to(PyrexTypes.c_long_type, env) - + def coerce_to_temp(self, env): # Ensure that the result is in a temporary. if self.result_in_temp(): return self else: return CoerceToTempNode(self, env) - + def coerce_to_simple(self, env): # Ensure that the result is simple (see is_simple). if self.is_simple(): return self else: return self.coerce_to_temp(env) - + def is_simple(self): # A node is simple if its result is something that can # be referred to without performing any operations, e.g. @@ -682,7 +682,7 @@ def as_none_safe_node(self, message, error="PyExc_TypeError"): class AtomicExprNode(ExprNode): # Abstract base class for expression nodes which have # no sub-expressions. - + subexprs = [] # Override to optimize -- we know we have no children @@ -693,10 +693,10 @@ def generate_subexpr_disposal_code(self, code): class PyConstNode(AtomicExprNode): # Abstract base class for constant Python values. - + is_literal = 1 type = py_object_type - + def is_simple(self): return 1 @@ -705,7 +705,7 @@ def may_be_none(self): def analyse_types(self, env): pass - + def calculate_result_code(self): return self.value @@ -715,11 +715,11 @@ def generate_result_code(self, code): class NoneNode(PyConstNode): # The constant value None - + value = "Py_None" constant_result = None - + nogil_check = None def compile_time_value(self, denv): @@ -731,7 +731,7 @@ def may_be_none(self): class EllipsisNode(PyConstNode): # '...' in a subscript list. - + value = "Py_Ellipsis" constant_result = Ellipsis @@ -744,7 +744,7 @@ class ConstNode(AtomicExprNode): # Abstract base type for literal constant nodes. # # value string C code fragment - + is_literal = 1 nogil_check = None @@ -756,10 +756,10 @@ def may_be_none(self): def analyse_types(self, env): pass # Types are held in class variables - + def check_const(self): return True - + def get_constant_c_result_code(self): return self.calculate_result_code() @@ -779,7 +779,7 @@ def calculate_constant_result(self): def compile_time_value(self, denv): return self.value - + def calculate_result_code(self): return str(int(self.value)) @@ -798,10 +798,10 @@ class CharNode(ConstNode): def calculate_constant_result(self): self.constant_result = ord(self.value) - + def compile_time_value(self, denv): return ord(self.value) - + def calculate_result_code(self): return "'%s'" % StringEncoding.escape_char(self.value) @@ -891,7 +891,7 @@ def generate_evaluation_code(self, code): self.result_code = code.get_py_num(plain_integer_string, self.longness) else: self.result_code = self.get_constant_c_result_code() - + def get_constant_c_result_code(self): return self.value_as_c_integer_string() + self.unsigned + self.longness @@ -928,7 +928,7 @@ def calculate_constant_result(self): def compile_time_value(self, denv): return float(self.value) - + def calculate_result_code(self): strval = self.value assert isinstance(strval, (str, unicode)) @@ -955,7 +955,7 @@ def compile_time_value(self, denv): def analyse_as_type(self, env): type = PyrexTypes.parse_basic_type(self.value) - if type is not None: + if type is not None: return type from TreeFragment import TreeFragment pos = (self.pos[0], self.pos[1], self.pos[2]-7) @@ -1019,7 +1019,7 @@ def generate_evaluation_code(self, code): def get_constant_c_result_code(self): return None # FIXME - + def calculate_result_code(self): return self.result_code @@ -1077,7 +1077,7 @@ def generate_evaluation_code(self, code): def calculate_result_code(self): return self.result_code - + def compile_time_value(self, env): return self.value @@ -1126,7 +1126,7 @@ def get_constant_c_result_code(self): def calculate_result_code(self): return self.result_code - + def compile_time_value(self, env): return self.value @@ -1146,10 +1146,10 @@ class LongNode(AtomicExprNode): def calculate_constant_result(self): self.constant_result = Utils.str_to_number(self.value) - + def compile_time_value(self, denv): return Utils.str_to_number(self.value) - + def analyse_types(self, env): self.is_temp = 1 @@ -1171,15 +1171,15 @@ class ImagNode(AtomicExprNode): # Imaginary number literal # # value float imaginary part - + type = PyrexTypes.c_double_complex_type def calculate_constant_result(self): self.constant_result = complex(0.0, self.value) - + def compile_time_value(self, denv): return complex(0.0, self.value) - + def analyse_types(self, env): self.type.create_declaration_utility_code(env) @@ -1214,16 +1214,16 @@ def generate_result_code(self, code): float(self.value), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - + class NewExprNode(AtomicExprNode): # C++ new statement # # cppclass node c++ class to create - + type = None - + def infer_type(self, env): type = self.cppclass.analyse_as_type(env) if type is None or not type.is_cpp_class: @@ -1241,7 +1241,7 @@ def infer_type(self, env): self.entry = constructor self.type = constructor.type return self.type - + def analyse_types(self, env): if self.type is None: self.infer_type(env) @@ -1251,7 +1251,7 @@ def may_be_none(self): def generate_result_code(self, code): pass - + def calculate_result_code(self): return "new " + self.class_type.declaration_code("") @@ -1262,7 +1262,7 @@ class NameNode(AtomicExprNode): # name string Python name of the variable # entry Entry Symbol table entry # type_entry Entry For extension type names, the original type entry - + is_name = True is_cython_module = False cython_attribute = None @@ -1275,12 +1275,12 @@ def create_analysed_rvalue(pos, env, entry): node = NameNode(pos) node.analyse_types(env, entry=entry) return node - + def as_cython_attribute(self): return self.cython_attribute - + create_analysed_rvalue = staticmethod(create_analysed_rvalue) - + def type_dependencies(self, env): if self.entry is None: self.entry = env.lookup(self.name) @@ -1288,7 +1288,7 @@ def type_dependencies(self, env): return (self.entry,) else: return () - + def infer_type(self, env): if self.entry is None: self.entry = env.lookup(self.name) @@ -1301,7 +1301,7 @@ def infer_type(self, env): return type_type else: return self.entry.type - + def compile_time_value(self, denv): try: return denv.lookup(self.name) @@ -1312,7 +1312,7 @@ def get_constant_c_result_code(self): if not self.entry or self.entry.type.is_pyobject: return None return self.entry.cname - + def coerce_to(self, dst_type, env): # If coercing to a generic pyobject and this is a builtin # C function with a Python equivalent, manufacture a NameNode @@ -1330,7 +1330,7 @@ def coerce_to(self, dst_type, env): node.analyse_rvalue_entry(env) return node return super(NameNode, self).coerce_to(dst_type, env) - + def analyse_as_module(self, env): # Try to interpret this as a reference to a cimported module. # Returns the module scope, or None. @@ -1340,7 +1340,7 @@ def analyse_as_module(self, env): if entry and entry.as_module: return entry.as_module return None - + def analyse_as_type(self, env): if self.cython_attribute: type = PyrexTypes.parse_basic_type(self.cython_attribute) @@ -1355,7 +1355,7 @@ def analyse_as_type(self, env): return entry.type else: return None - + def analyse_as_extension_type(self, env): # Try to interpret this as a reference to an extension type. # Returns the extension type, or None. @@ -1366,7 +1366,7 @@ def analyse_as_extension_type(self, env): return entry.type else: return None - + def analyse_target_declaration(self, env): if not self.entry: self.entry = env.lookup_here(self.name) @@ -1382,7 +1382,7 @@ def analyse_target_declaration(self, env): env.control_flow.set_state(self.pos, (self.name, 'source'), 'assignment') if self.entry.is_declared_generic: self.result_ctype = py_object_type - + def analyse_types(self, env): if self.entry is None: self.entry = env.lookup(self.name) @@ -1400,7 +1400,7 @@ def analyse_types(self, env): if entry.utility_code: env.use_utility_code(entry.utility_code) self.analyse_rvalue_entry(env) - + def analyse_target_types(self, env): self.analyse_entry(env) if not self.is_lvalue(): @@ -1411,7 +1411,7 @@ def analyse_target_types(self, env): if self.entry.type.is_buffer: import Buffer Buffer.used_buffer_aux_vars(self.entry) - + def analyse_rvalue_entry(self, env): #print "NameNode.analyse_rvalue_entry:", self.name ### #print "Entry:", self.entry.__dict__ ### @@ -1452,29 +1452,29 @@ def check_identifier_kind(self): entry = self.entry if entry.is_type and entry.type.is_extension_type: self.type_entry = entry - if not (entry.is_const or entry.is_variable + if not (entry.is_const or entry.is_variable or entry.is_builtin or entry.is_cfunction or entry.is_cpp_class): if self.entry.as_variable: self.entry = self.entry.as_variable else: - error(self.pos, + error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name) def is_simple(self): # If it's not a C variable, it'll be in a temp. return 1 - + def calculate_target_results(self, env): pass - + def check_const(self): entry = self.entry if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin): self.not_const() return False return True - + def check_const_addr(self): entry = self.entry if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin): @@ -1486,18 +1486,18 @@ def is_lvalue(self): return self.entry.is_variable and \ not self.entry.type.is_array and \ not self.entry.is_readonly - + def is_ephemeral(self): # Name nodes are never ephemeral, even if the # result is in a temporary. return 0 - + def calculate_result_code(self): entry = self.entry if not entry: return "" # There was an error earlier return entry.cname - + def generate_result_code(self, code): assert hasattr(self, 'entry') entry = self.entry @@ -1519,7 +1519,7 @@ def generate_result_code(self, code): interned_cname, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - + elif entry.is_pyglobal or entry.is_builtin: assert entry.type.is_pyobject, "Python global or builtin not a Python object" interned_cname = code.intern_identifier(self.entry.name) @@ -1531,11 +1531,11 @@ def generate_result_code(self, code): code.putln( '%s = __Pyx_GetName(%s, %s); %s' % ( self.result(), - namespace, + namespace, interned_cname, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - + elif entry.is_local and False: # control flow not good enough yet assigned = entry.scope.control_flow.get_state((entry.name, 'initialized'), self.pos) @@ -1555,7 +1555,7 @@ def generate_assignment_code(self, rhs, code): if (self.entry.type.is_ptr and isinstance(rhs, ListNode) and not self.lhs_of_first_assignment): error(self.pos, "Literal list must be assigned to pointer at time of declaration") - + # is_pyglobal seems to be True for module level-globals only. # We use this to access class->tp_dict if necessary. if entry.is_pyglobal: @@ -1651,11 +1651,11 @@ def generate_acquire_buffer(self, rhs, code): Buffer.put_assign_to_buffer(self.result(), rhstmp, buffer_aux, self.entry.type, is_initialized=not self.lhs_of_first_assignment, pos=self.pos, code=code) - + if not pretty_rhs: code.putln("%s = 0;" % rhstmp) code.funcstate.release_temp(rhstmp) - + def generate_deletion_code(self, code): if self.entry is None: return # There was an error earlier @@ -1669,11 +1669,11 @@ def generate_deletion_code(self, code): namespace, self.entry.name)) else: - code.put_error_if_neg(self.pos, + code.put_error_if_neg(self.pos, '__Pyx_DelAttrString(%s, "%s")' % ( Naming.module_cname, self.entry.name)) - + def annotate(self, code): if hasattr(self, 'is_called') and self.is_called: pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1) @@ -1681,16 +1681,16 @@ def annotate(self, code): code.annotate(pos, AnnotationItem('py_call', 'python function', size=len(self.name))) else: code.annotate(pos, AnnotationItem('c_call', 'c function', size=len(self.name))) - + class BackquoteNode(ExprNode): # `expr` # # arg ExprNode - + type = py_object_type - + subexprs = ['arg'] - + def analyse_types(self, env): self.arg.analyse_types(env) self.arg = self.arg.coerce_to_pyobject(env) @@ -1708,21 +1708,21 @@ def generate_result_code(self, code): self.arg.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - + class ImportNode(ExprNode): # Used as part of import statement implementation. - # Implements result = + # Implements result = # __import__(module_name, globals(), None, name_list) # # module_name StringNode dotted name of module # name_list ListNode or None list of names to be imported - + type = py_object_type - + subexprs = ['module_name', 'name_list'] - + def analyse_types(self, env): self.module_name.analyse_types(env) self.module_name = self.module_name.coerce_to_pyobject(env) @@ -1757,11 +1757,11 @@ class IteratorNode(ExprNode): # Implements result = iter(sequence) # # sequence ExprNode - + type = py_object_type - + subexprs = ['sequence'] - + def analyse_types(self, env): self.sequence.analyse_types(env) if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \ @@ -1822,16 +1822,16 @@ class NextNode(AtomicExprNode): # The iterator is not owned by this node. # # iterator ExprNode - + type = py_object_type - + def __init__(self, iterator, env): self.pos = iterator.pos self.iterator = iterator if iterator.type.is_ptr or iterator.type.is_array: self.type = iterator.type.base_type self.is_temp = 1 - + def generate_result_code(self, code): sequence_type = self.iterator.sequence.type if sequence_type is list_type: @@ -1884,15 +1884,15 @@ class ExcValueNode(AtomicExprNode): # Node created during analyse_types phase # of an ExceptClauseNode to fetch the current # exception value. - + type = py_object_type - + def __init__(self, pos, env): ExprNode.__init__(self, pos) def set_var(self, var): self.var = var - + def calculate_result_code(self): return self.var @@ -1913,17 +1913,17 @@ class TempNode(ExprNode): # the regular cycle. subexprs = [] - + def __init__(self, pos, type, env): ExprNode.__init__(self, pos) self.type = type if type.is_pyobject: self.result_ctype = py_object_type self.is_temp = 1 - + def analyse_types(self, env): return self.type - + def generate_result_code(self, code): pass @@ -1944,19 +1944,19 @@ def result(self): # Do not participate in normal temp alloc/dealloc: def allocate_temp_result(self, code): pass - + def release_temp_result(self, code): pass class PyTempNode(TempNode): # TempNode holding a Python value. - + def __init__(self, pos, env): TempNode.__init__(self, pos, PyrexTypes.py_object_type, env) class RawCNameExprNode(ExprNode): subexprs = [] - + def __init__(self, pos, type=None): self.pos = pos self.type = type @@ -1991,7 +1991,7 @@ class IndexNode(ExprNode): # indices is used on buffer access, index on non-buffer access. # The former contains a clean list of index parameters, the # latter whatever Python object is needed for index access. - + subexprs = ['base', 'index', 'indices'] indices = None @@ -2010,13 +2010,13 @@ def compile_time_value(self, denv): return base[index] except Exception, e: self.compile_time_value_error(e) - + def is_ephemeral(self): return self.base.is_ephemeral() - + def analyse_target_declaration(self, env): pass - + def analyse_as_type(self, env): base_type = self.base.analyse_as_type(env) if base_type and not base_type.is_pyobject: @@ -2027,23 +2027,23 @@ def analyse_as_type(self, env): template_values = [self.index] import Nodes type_node = Nodes.TemplatedTypeNode( - pos = self.pos, - positional_args = template_values, + pos = self.pos, + positional_args = template_values, keyword_args = None) return type_node.analyse(env, base_type = base_type) else: return PyrexTypes.CArrayType(base_type, int(self.index.compile_time_value(env))) return None - + def type_dependencies(self, env): return self.base.type_dependencies(env) - + def infer_type(self, env): base_type = self.base.infer_type(env) if isinstance(self.index, SliceNode): # slicing! if base_type.is_string: - # sliced C strings must coerce to Python + # sliced C strings must coerce to Python return bytes_type elif base_type in (unicode_type, bytes_type, str_type, list_type, tuple_type): # slicing these returns the same type @@ -2080,10 +2080,10 @@ def infer_type(self, env): else: # TODO: Handle buffers (hopefully without too much redundancy). return py_object_type - + def analyse_types(self, env): self.analyse_base_and_index_types(env, getting = 1) - + def analyse_target_types(self, env): self.analyse_base_and_index_types(env, setting = 1) @@ -2103,7 +2103,7 @@ def analyse_base_and_index_types(self, env, getting = 0, setting = 0): # error messages self.type = PyrexTypes.error_type return - + is_slice = isinstance(self.index, SliceNode) # Potentially overflowing index value. if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value): @@ -2235,7 +2235,7 @@ def nogil_check(self, env): def check_const_addr(self): return self.base.check_const_addr() and self.index.check_const() - + def is_lvalue(self): return 1 @@ -2253,7 +2253,7 @@ def calculate_result_code(self): else: return "(%s[%s])" % ( self.base.result(), self.index.result()) - + def extra_index_params(self): if self.index.type.is_int: if self.original_index_type.signed: @@ -2271,7 +2271,7 @@ def generate_subexpr_evaluation_code(self, code): else: for i in self.indices: i.generate_evaluation_code(code) - + def generate_subexpr_disposal_code(self, code): self.base.generate_disposal_code(code) if not self.indices: @@ -2339,7 +2339,7 @@ def generate_result_code(self, code): self.extra_index_params(), self.result(), code.error_goto(self.pos))) - + def generate_setitem_code(self, value_code, code): if self.index.type.is_int: function = "__Pyx_SetItemInt" @@ -2350,12 +2350,12 @@ def generate_setitem_code(self, value_code, code): if self.base.type is dict_type: function = "PyDict_SetItem" # It would seem that we could specialized lists/tuples, but that - # shouldn't happen here. - # Both PyList_SetItem PyTuple_SetItem and a Py_ssize_t as input, - # not a PyObject*, and bad conversion here would give the wrong - # exception. Also, tuples are supposed to be immutable, and raise - # TypeErrors when trying to set their entries (PyTuple_SetItem - # is for creating new tuples from). + # shouldn't happen here. + # Both PyList_SetItem PyTuple_SetItem and a Py_ssize_t as input, + # not a PyObject*, and bad conversion here would give the wrong + # exception. Also, tuples are supposed to be immutable, and raise + # TypeErrors when trying to set their entries (PyTuple_SetItem + # is for creating new tuples from). else: function = "PyObject_SetItem" code.putln( @@ -2385,7 +2385,7 @@ def generate_buffer_setitem_code(self, rhs, code, op=""): code.putln("*%s %s= %s;" % (ptr, op, rhs_code)) code.put_giveref("*%s" % ptr) code.funcstate.release_temp(ptr) - else: + else: # Simple case code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result())) @@ -2403,7 +2403,7 @@ def generate_assignment_code(self, rhs, code): self.free_subexpr_temps(code) rhs.generate_disposal_code(code) rhs.free_temps(code) - + def generate_deletion_code(self, code): self.generate_subexpr_evaluation_code(code) #if self.type.is_pyobject: @@ -2455,7 +2455,7 @@ class SliceIndexNode(ExprNode): # base ExprNode # start ExprNode or None # stop ExprNode or None - + subexprs = ['base', 'start', 'stop'] def infer_type(self, env): @@ -2485,10 +2485,10 @@ def compile_time_value(self, denv): return base[start:stop] except Exception, e: self.compile_time_value_error(e) - + def analyse_target_declaration(self, env): pass - + def analyse_target_types(self, env): self.analyse_types(env) # when assigning, we must accept any Python type @@ -2558,11 +2558,11 @@ def generate_result_code(self, code): self.stop_code(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - + def generate_assignment_code(self, rhs, code): self.generate_subexpr_evaluation_code(code) if self.type.is_pyobject: - code.put_error_if_neg(self.pos, + code.put_error_if_neg(self.pos, "__Pyx_PySequence_SetSlice(%s, %s, %s, %s)" % ( self.base.py_result(), self.start_code(), @@ -2653,13 +2653,13 @@ def generate_slice_guard_code(self, code, target_size): target_size, check)) code.putln(code.error_goto(self.pos)) code.putln("}") - + def start_code(self): if self.start: return self.start.result() else: return "0" - + def stop_code(self): if self.stop: return self.stop.result() @@ -2667,11 +2667,11 @@ def stop_code(self): return self.base.type.size else: return "PY_SSIZE_T_MAX" - + def calculate_result_code(self): # self.result() is not used, but this method must exist return "" - + class SliceNode(ExprNode): # start:stop:step in subscript list @@ -2679,7 +2679,7 @@ class SliceNode(ExprNode): # start ExprNode # stop ExprNode # step ExprNode - + type = py_object_type is_temp = 1 @@ -2705,7 +2705,7 @@ def compile_time_value(self, denv): self.compile_time_value_error(e) subexprs = ['start', 'stop', 'step'] - + def analyse_types(self, env): self.start.analyse_types(env) self.stop.analyse_types(env) @@ -2720,8 +2720,8 @@ def generate_result_code(self, code): code.putln( "%s = PySlice_New(%s, %s, %s); %s" % ( self.result(), - self.start.py_result(), - self.stop.py_result(), + self.start.py_result(), + self.stop.py_result(), self.step.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) @@ -2760,7 +2760,7 @@ def analyse_as_type_constructor(self, env): self.function.set_cname(type.declaration_code("")) self.analyse_c_function_call(env) return True - + def is_lvalue(self): return self.type.is_reference @@ -2785,9 +2785,9 @@ class SimpleCallNode(CallNode): # wrapper_call bool used internally # has_optional_args bool used internally # nogil bool used internally - + subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple'] - + self = None coerced_self = None arg_tuple = None @@ -2795,7 +2795,7 @@ class SimpleCallNode(CallNode): has_optional_args = False nogil = False analysed = False - + def compile_time_value(self, denv): function = self.function.compile_time_value(denv) args = [arg.compile_time_value(denv) for arg in self.args] @@ -2803,12 +2803,12 @@ def compile_time_value(self, denv): return function(*args) except Exception, e: self.compile_time_value_error(e) - + def type_dependencies(self, env): # TODO: Update when Danilo's C++ code merged in to handle the # the case of function overloading. return self.function.type_dependencies(env) - + def infer_type(self, env): function = self.function func_type = function.infer_type(env) @@ -2903,7 +2903,7 @@ def analyse_types(self, env): # Insert coerced 'self' argument into argument list. self.args.insert(0, self.coerced_self) self.analyse_c_function_call(env) - + def function_type(self): # Return the type of the function being called, coercing a function # pointer to a function if necessary. @@ -2911,7 +2911,7 @@ def function_type(self): if func_type.is_ptr: func_type = func_type.base_type return func_type - + def analyse_c_function_call(self, env): if self.function.type is error_type: self.type = error_type @@ -2955,7 +2955,7 @@ def analyse_c_function_call(self, env): self.args[i] = self.args[i].coerce_to(formal_type, env) for i in range(max_nargs, actual_nargs): if self.args[i].type.is_pyobject: - error(self.args[i].pos, + error(self.args[i].pos, "Python object cannot be passed as a varargs parameter") # Calc result type and code fragment if isinstance(self.function, NewExprNode): @@ -2981,7 +2981,7 @@ def analyse_c_function_call(self, env): def calculate_result_code(self): return self.c_call_code() - + def c_call_code(self): func_type = self.function_type() if self.type is PyrexTypes.error_type or not func_type.is_cfunction: @@ -2995,23 +2995,23 @@ def c_call_code(self): for formal_arg, actual_arg in args[:expected_nargs]: arg_code = actual_arg.result_as(formal_arg.type) arg_list_code.append(arg_code) - + if func_type.is_overridable: arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod))) - + if func_type.optional_arg_count: if expected_nargs == actual_nargs: optional_args = 'NULL' else: optional_args = "&%s" % self.opt_arg_struct arg_list_code.append(optional_args) - + for actual_arg in self.args[len(formal_args):]: arg_list_code.append(actual_arg.result()) result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code)) return result - + def generate_result_code(self, code): func_type = self.function_type() if func_type.is_pyobject: @@ -3050,7 +3050,7 @@ def generate_result_code(self, code): if exc_check: if self.nogil: exc_checks.append("__Pyx_ErrOccurredWithGIL()") - else: + else: exc_checks.append("PyErr_Occurred()") if self.is_temp or exc_checks: rhs = self.c_call_code() @@ -3140,9 +3140,9 @@ class GeneralCallNode(CallNode): # positional_args ExprNode Tuple of positional arguments # keyword_args ExprNode or None Dict of keyword arguments # starstar_arg ExprNode or None Dict of extra keyword args - + type = py_object_type - + subexprs = ['function', 'positional_args', 'keyword_args', 'starstar_arg'] nogil_check = Node.gil_error @@ -3157,7 +3157,7 @@ def compile_time_value(self, denv): return function(*positional_args, **keyword_args) except Exception, e: self.compile_time_value_error(e) - + def explicit_args_kwds(self): if self.starstar_arg or not isinstance(self.positional_args, TupleNode): raise CompileError(self.pos, @@ -3196,14 +3196,14 @@ def analyse_types(self, env): else: self.type = py_object_type self.is_temp = 1 - + def generate_result_code(self, code): if self.type.is_error: return kwargs_call_function = "PyEval_CallObjectWithKeywords" if self.keyword_args and self.starstar_arg: - code.put_error_if_neg(self.pos, + code.put_error_if_neg(self.pos, "PyDict_Update(%s, %s)" % ( - self.keyword_args.py_result(), + self.keyword_args.py_result(), self.starstar_arg.py_result())) keyword_code = self.keyword_args.py_result() elif self.keyword_args: @@ -3239,12 +3239,12 @@ class AsTupleNode(ExprNode): # the * argument of a function call. # # arg ExprNode - + subexprs = ['arg'] def calculate_constant_result(self): self.constant_result = tuple(self.base.constant_result) - + def compile_time_value(self, denv): arg = self.arg.compile_time_value(denv) try: @@ -3271,7 +3271,7 @@ def generate_result_code(self, code): self.arg.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - + class AttributeNode(ExprNode): # obj.attribute @@ -3287,10 +3287,10 @@ class AttributeNode(ExprNode): # member string C name of struct member # is_called boolean Function call is being done on result # entry Entry Symbol table entry of attribute - + is_attribute = 1 subexprs = ['obj'] - + type = PyrexTypes.error_type entry = None is_called = 0 @@ -3312,7 +3312,7 @@ def coerce_to(self, dst_type, env): # must be a cpdef function self.is_temp = 1 self.entry = entry.as_variable - self.analyse_as_python_attribute(env) + self.analyse_as_python_attribute(env) return self return ExprNode.coerce_to(self, dst_type, env) @@ -3333,10 +3333,10 @@ def compile_time_value(self, denv): return getattr(obj, attr) except Exception, e: self.compile_time_value_error(e) - + def type_dependencies(self, env): return self.obj.type_dependencies(env) - + def infer_type(self, env): if self.analyse_as_cimported_attribute(env, 0): return self.entry.type @@ -3348,17 +3348,17 @@ def infer_type(self, env): def analyse_target_declaration(self, env): pass - + def analyse_target_types(self, env): self.analyse_types(env, target = 1) - + def analyse_types(self, env, target = 0): if self.analyse_as_cimported_attribute(env, target): return if not target and self.analyse_as_unbound_cmethod(env): return self.analyse_as_ordinary_attribute(env, target) - + def analyse_as_cimported_attribute(self, env, target): # Try to interpret this as a reference to an imported # C const, type, var or function. If successful, mutates @@ -3373,7 +3373,7 @@ def analyse_as_cimported_attribute(self, env, target): self.mutate_into_name_node(env, entry, target) return 1 return 0 - + def analyse_as_unbound_cmethod(self, env): # Try to interpret this as a reference to an unbound # C method of an extension type. If successful, mutates @@ -3394,7 +3394,7 @@ def analyse_as_unbound_cmethod(self, env): self.mutate_into_name_node(env, ubcm_entry, None) return 1 return 0 - + def analyse_as_type(self, env): module_scope = self.obj.analyse_as_module(env) if module_scope: @@ -3404,7 +3404,7 @@ def analyse_as_type(self, env): if base_type and hasattr(base_type, 'scope'): return base_type.scope.lookup_type(self.attribute) return None - + def analyse_as_extension_type(self, env): # Try to interpret this as a reference to an extension type # in a cimported module. Returns the extension type, or None. @@ -3414,7 +3414,7 @@ def analyse_as_extension_type(self, env): if entry and entry.is_type and entry.type.is_extension_type: return entry.type return None - + def analyse_as_module(self, env): # Try to interpret this as a reference to a cimported module # in another cimported module. Returns the module scope, or None. @@ -3424,7 +3424,7 @@ def analyse_as_module(self, env): if entry and entry.as_module: return entry.as_module return None - + def mutate_into_name_node(self, env, entry, target): # Mutate this node into a NameNode and complete the # analyse_types phase. @@ -3437,7 +3437,7 @@ def mutate_into_name_node(self, env, entry, target): NameNode.analyse_target_types(self, env) else: NameNode.analyse_rvalue_entry(self, env) - + def analyse_as_ordinary_attribute(self, env, target): self.obj.analyse_types(env) self.analyse_attribute(env) @@ -3479,8 +3479,8 @@ def analyse_attribute(self, env, obj_type = None): if entry and entry.is_member: entry = None else: - error(self.pos, - "Cannot select attribute of incomplete type '%s'" + error(self.pos, + "Cannot select attribute of incomplete type '%s'" % obj_type) self.type = PyrexTypes.error_type return @@ -3499,7 +3499,7 @@ def analyse_attribute(self, env, obj_type = None): # method of an extension type, so we treat it like a Python # attribute. pass - # If we get here, the base object is not a struct/union/extension + # If we get here, the base object is not a struct/union/extension # type, or it is an extension type and the attribute is either not # declared or is declared as a Python method. Treat it as a Python # attribute reference. @@ -3536,13 +3536,13 @@ def is_lvalue(self): return 1 else: return NameNode.is_lvalue(self) - + def is_ephemeral(self): if self.obj: return self.obj.is_ephemeral() else: return NameNode.is_ephemeral(self) - + def calculate_result_code(self): #print "AttributeNode.calculate_result_code:", self.member ### #print "...obj node =", self.obj, "code", self.obj.result() ### @@ -3553,7 +3553,7 @@ def calculate_result_code(self): if self.entry and self.entry.is_cmethod: if obj.type.is_extension_type: return "((struct %s *)%s%s%s)->%s" % ( - obj.type.vtabstruct_cname, obj_code, self.op, + obj.type.vtabstruct_cname, obj_code, self.op, obj.type.vtabslot_cname, self.member) else: return self.member @@ -3564,7 +3564,7 @@ def calculate_result_code(self): # accessing a field of a builtin type, need to cast better than result_as() does obj_code = obj.type.cast_code(obj.result(), to_object_struct = True) return "%s%s%s" % (obj_code, self.op, self.member) - + def generate_result_code(self, code): interned_attr_cname = code.intern_identifier(self.attribute) if self.is_py_attr: @@ -3582,12 +3582,12 @@ def generate_result_code(self, code): and self.needs_none_check and code.globalstate.directives['nonecheck']): self.put_nonecheck(code) - + def generate_assignment_code(self, rhs, code): interned_attr_cname = code.intern_identifier(self.attribute) self.obj.generate_evaluation_code(code) if self.is_py_attr: - code.put_error_if_neg(self.pos, + code.put_error_if_neg(self.pos, 'PyObject_SetAttr(%s, %s, %s)' % ( self.obj.py_result(), interned_attr_cname, @@ -3620,7 +3620,7 @@ def generate_assignment_code(self, rhs, code): rhs.free_temps(code) self.obj.generate_disposal_code(code) self.obj.free_temps(code) - + def generate_deletion_code(self, code): interned_attr_cname = code.intern_identifier(self.attribute) self.obj.generate_evaluation_code(code) @@ -3634,7 +3634,7 @@ def generate_deletion_code(self, code): error(self.pos, "Cannot delete C attribute of extension type") self.obj.generate_disposal_code(code) self.obj.free_temps(code) - + def annotate(self, code): if self.is_py_attr: code.annotate(self.pos, AnnotationItem('py_attr', 'python attribute', size=len(self.attribute))) @@ -3707,9 +3707,9 @@ class SequenceNode(ExprNode): # iterator ExprNode # unpacked_items [ExprNode] or None # coerced_unpacked_items [ExprNode] or None - + subexprs = ['args'] - + is_sequence_constructor = 1 unpacked_items = None @@ -3766,7 +3766,7 @@ def analyse_target_types(self, env): def generate_result_code(self, code): self.generate_operation_code(code) - + def generate_assignment_code(self, rhs, code): if self.starred_assignment: self.generate_starred_assignment_code(rhs, code) @@ -3788,8 +3788,8 @@ def generate_parallel_assignment_code(self, rhs, code): tuple_check = "PyTuple_CheckExact(%s)" code.putln( "if (%s && likely(PyTuple_GET_SIZE(%s) == %s)) {" % ( - tuple_check % rhs.py_result(), - rhs.py_result(), + tuple_check % rhs.py_result(), + rhs.py_result(), len(self.args))) code.putln("PyObject* tuple = %s;" % rhs.py_result()) for item in self.unpacked_items: @@ -3808,7 +3808,7 @@ def generate_parallel_assignment_code(self, rhs, code): for i in range(len(self.args)): self.args[i].generate_assignment_code( self.coerced_unpacked_items[i], code) - + code.putln("} else {") if rhs.type is tuple_type: @@ -3934,7 +3934,7 @@ def annotate(self, code): class TupleNode(SequenceNode): # Tuple constructor. - + type = tuple_type gil_message = "Constructing Python tuple" @@ -3968,7 +3968,7 @@ def compile_time_value(self, denv): return tuple(values) except Exception, e: self.compile_time_value_error(e) - + def generate_operation_code(self, code): if len(self.args) == 0: # result_code is Naming.empty_tuple @@ -3998,7 +3998,7 @@ def generate_operation_code(self, code): code.put_giveref(arg.py_result()) if self.is_literal: code.put_giveref(self.py_result()) - + def generate_subexpr_disposal_code(self, code): # We call generate_post_assignment_code here instead # of generate_disposal_code, because values were stored @@ -4011,7 +4011,7 @@ def generate_subexpr_disposal_code(self, code): class ListNode(SequenceNode): # List constructor. - + # obj_conversion_errors [PyrexError] used internally # orignial_args [ExprNode] used internally @@ -4019,10 +4019,10 @@ class ListNode(SequenceNode): type = list_type gil_message = "Constructing Python list" - + def type_dependencies(self, env): return () - + def infer_type(self, env): # TOOD: Infer non-object list arrays. return list_type @@ -4037,7 +4037,7 @@ def analyse_types(self, env): SequenceNode.analyse_types(self, env) self.obj_conversion_errors = held_errors() release_errors(ignore=True) - + def coerce_to(self, dst_type, env): if dst_type.is_pyobject: for err in self.obj_conversion_errors: @@ -4068,11 +4068,11 @@ def coerce_to(self, dst_type, env): self.type = error_type error(self.pos, "Cannot coerce list to type '%s'" % dst_type) return self - + def release_temp(self, env): if self.type.is_array: - # To be valid C++, we must allocate the memory on the stack - # manually and be sure not to reuse it for something else. + # To be valid C++, we must allocate the memory on the stack + # manually and be sure not to reuse it for something else. pass else: SequenceNode.release_temp(self, env) @@ -4245,7 +4245,7 @@ def may_be_none(self): def calculate_result_code(self): return self.target.result() - + def generate_result_code(self, code): self.generate_operation_code(code) @@ -4263,7 +4263,7 @@ class ComprehensionAppendNode(Node): child_attrs = ['expr'] type = PyrexTypes.c_int_type - + def analyse_expressions(self, env): self.expr.analyse_expressions(env) if not self.expr.type.is_pyobject: @@ -4407,7 +4407,7 @@ class SetNode(ExprNode): subexprs = ['args'] gil_message = "Constructing Python set" - + def analyse_types(self, env): for i in range(len(self.args)): arg = self.args[i] @@ -4454,7 +4454,7 @@ class DictNode(ExprNode): # key_value_pairs [DictItemNode] # # obj_conversion_errors [PyrexError] used internally - + subexprs = ['key_value_pairs'] is_temp = 1 type = dict_type @@ -4464,7 +4464,7 @@ class DictNode(ExprNode): def calculate_constant_result(self): self.constant_result = dict([ item.constant_result for item in self.key_value_pairs]) - + def compile_time_value(self, denv): pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv)) for item in self.key_value_pairs] @@ -4472,10 +4472,10 @@ def compile_time_value(self, denv): return dict(pairs) except Exception, e: self.compile_time_value_error(e) - + def type_dependencies(self, env): return () - + def infer_type(self, env): # TOOD: Infer struct constructors. return dict_type @@ -4489,7 +4489,7 @@ def analyse_types(self, env): def may_be_none(self): return False - + def coerce_to(self, dst_type, env): if dst_type.is_pyobject: self.release_errors() @@ -4521,7 +4521,7 @@ def coerce_to(self, dst_type, env): self.type = error_type error(self.pos, "Cannot interpret dict as type '%s'" % dst_type) return self - + def release_errors(self): for err in self.obj_conversion_errors: report_error(err) @@ -4544,7 +4544,7 @@ def generate_evaluation_code(self, code): for item in self.key_value_pairs: item.generate_evaluation_code(code) if self.type.is_pyobject: - code.put_error_if_neg(self.pos, + code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % ( self.result(), item.key.py_result(), @@ -4556,11 +4556,11 @@ def generate_evaluation_code(self, code): item.value.result())) item.generate_disposal_code(code) item.free_temps(code) - + def annotate(self, code): for item in self.key_value_pairs: item.annotate(code) - + class DictItemNode(ExprNode): # Represents a single item in a DictNode # @@ -4573,13 +4573,13 @@ class DictItemNode(ExprNode): def calculate_constant_result(self): self.constant_result = ( self.key.constant_result, self.value.constant_result) - + def analyse_types(self, env): self.key.analyse_types(env) self.value.analyse_types(env) self.key = self.key.coerce_to_pyobject(env) self.value = self.value.coerce_to_pyobject(env) - + def generate_evaluation_code(self, code): self.key.generate_evaluation_code(code) self.value.generate_evaluation_code(code) @@ -4591,7 +4591,7 @@ def generate_disposal_code(self, code): def free_temps(self, code): self.key.free_temps(code) self.value.free_temps(code) - + def __iter__(self): return iter([self.key, self.value]) @@ -4613,7 +4613,7 @@ class ClassNode(ExprNode, ModuleNameMixin): # dict ExprNode Class dict (not owned by this node) # doc ExprNode or None Doc string # module_name EncodedString Name of defining module - + subexprs = ['bases', 'doc'] def analyse_types(self, env): @@ -4636,7 +4636,7 @@ def generate_result_code(self, code): cname = code.intern_identifier(self.name) if self.doc: - code.put_error_if_neg(self.pos, + code.put_error_if_neg(self.pos, 'PyDict_SetItemString(%s, "__doc__", %s)' % ( self.dict.py_result(), self.doc.py_result())) @@ -4804,9 +4804,9 @@ class BoundMethodNode(ExprNode): # # function ExprNode Function object # self_object ExprNode self object - + subexprs = ['function'] - + def analyse_types(self, env): self.function.analyse_types(env) self.type = py_object_type @@ -4830,12 +4830,12 @@ class UnboundMethodNode(ExprNode): # object from a class and a function. # # function ExprNode Function object - + type = py_object_type is_temp = 1 - + subexprs = ['function'] - + def analyse_types(self, env): self.function.analyse_types(env) @@ -4868,10 +4868,10 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin): subexprs = [] self_object = None binding = False - + type = py_object_type is_temp = 1 - + def analyse_types(self, env): if self.binding: env.use_utility_code(binding_cfunc_utility_code) @@ -4881,7 +4881,7 @@ def analyse_types(self, env): def may_be_none(self): return False - + gil_message = "Constructing Python function" def self_result_code(self): @@ -4990,14 +4990,14 @@ class UnopNode(ExprNode): # - Check operand type and coerce if needed. # - Determine result type and result code fragment. # - Allocate temporary for result if needed. - + subexprs = ['operand'] infix = True def calculate_constant_result(self): func = compile_time_unary_operators[self.operator] self.constant_result = func(self.operand.constant_result) - + def compile_time_value(self, denv): func = compile_time_unary_operators.get(self.operator) if not func: @@ -5009,7 +5009,7 @@ def compile_time_value(self, denv): return func(operand) except Exception, e: self.compile_time_value_error(e) - + def infer_type(self, env): operand_type = self.operand.infer_type(env) if operand_type.is_pyobject: @@ -5027,10 +5027,10 @@ def analyse_types(self, env): self.analyse_cpp_operation(env) else: self.analyse_c_operation(env) - + def check_const(self): return self.operand.check_const() - + def is_py_operation(self): return self.operand.type.is_pyobject @@ -5041,24 +5041,24 @@ def nogil_check(self, env): def is_cpp_operation(self): type = self.operand.type return type.is_cpp_class - + def coerce_operand_to_pyobject(self, env): self.operand = self.operand.coerce_to_pyobject(env) - + def generate_result_code(self, code): if self.operand.type.is_pyobject: self.generate_py_operation_code(code) - + def generate_py_operation_code(self, code): function = self.py_operation_function() code.putln( "%s = %s(%s); %s" % ( - self.result(), - function, + self.result(), + function, self.operand.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - + def type_error(self): if not self.operand.type.is_error: error(self.pos, "Invalid operand type for '%s' (%s)" % @@ -5085,11 +5085,11 @@ class NotNode(ExprNode): # 'not' operator # # operand ExprNode - + type = PyrexTypes.c_bint_type subexprs = ['operand'] - + def calculate_constant_result(self): self.constant_result = not self.operand.constant_result @@ -5102,29 +5102,29 @@ def compile_time_value(self, denv): def infer_type(self, env): return PyrexTypes.c_bint_type - + def analyse_types(self, env): self.operand.analyse_types(env) self.operand = self.operand.coerce_to_boolean(env) - + def calculate_result_code(self): return "(!%s)" % self.operand.result() - + def generate_result_code(self, code): pass class UnaryPlusNode(UnopNode): # unary '+' operator - + operator = '+' - + def analyse_c_operation(self, env): self.type = self.operand.type - + def py_operation_function(self): return "PyNumber_Positive" - + def calculate_result_code(self): if self.is_cpp_operation(): return "(+%s)" % self.operand.result() @@ -5134,9 +5134,9 @@ def calculate_result_code(self): class UnaryMinusNode(UnopNode): # unary '-' operator - + operator = '-' - + def analyse_c_operation(self, env): if self.operand.type.is_numeric: self.type = self.operand.type @@ -5144,10 +5144,10 @@ def analyse_c_operation(self, env): self.type_error() if self.type.is_complex: self.infix = False - + def py_operation_function(self): return "PyNumber_Negative" - + def calculate_result_code(self): if self.infix: return "(-%s)" % self.operand.result() @@ -5170,7 +5170,7 @@ def analyse_c_operation(self, env): def py_operation_function(self): return "PyNumber_Invert" - + def calculate_result_code(self): return "(~%s)" % self.operand.result() @@ -5184,7 +5184,7 @@ class DereferenceNode(CUnopNode): # unary * operator operator = '*' - + def analyse_c_operation(self, env): if self.operand.type.is_ptr: self.type = self.operand.type.base_type @@ -5197,7 +5197,7 @@ def calculate_result_code(self): class DecrementIncrementNode(CUnopNode): # unary ++/-- operator - + def analyse_c_operation(self, env): if self.operand.type.is_ptr or self.operand.type.is_numeric: self.type = self.operand.type @@ -5218,9 +5218,9 @@ class AmpersandNode(ExprNode): # The C address-of operator. # # operand ExprNode - + subexprs = ['operand'] - + def infer_type(self, env): return PyrexTypes.c_ptr_type(self.operand.infer_type(env)) @@ -5234,21 +5234,21 @@ def analyse_types(self, env): self.error("Cannot take address of Python variable") return self.type = PyrexTypes.c_ptr_type(argtype) - + def check_const(self): return self.operand.check_const_addr() - + def error(self, mess): error(self.pos, mess) self.type = PyrexTypes.error_type self.result_code = "" - + def calculate_result_code(self): return "(&%s)" % self.operand.result() def generate_result_code(self, code): pass - + unop_node_classes = { "+": UnaryPlusNode, @@ -5257,14 +5257,14 @@ def generate_result_code(self, code): } def unop_node(pos, operator, operand): - # Construct unnop node of appropriate class for + # Construct unnop node of appropriate class for # given operator. if isinstance(operand, IntNode) and operator == '-': return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value))) elif isinstance(operand, UnopNode) and operand.operator == operator: warning(pos, "Python has no increment/decrement operator: %s%sx = %s(%sx) = x" % ((operator,)*4), 5) - return unop_node_classes[operator](pos, - operator = operator, + return unop_node_classes[operator](pos, + operator = operator, operand = operand) @@ -5277,19 +5277,19 @@ class TypecastNode(ExprNode): # # If used from a transform, one can if wanted specify the attribute # "type" directly and leave base_type and declarator to None - + subexprs = ['operand'] base_type = declarator = type = None - + def type_dependencies(self, env): return () - + def infer_type(self, env): if self.type is None: base_type = self.base_type.analyse(env) _, self.type = self.declarator.analyse(base_type, env) return self.type - + def analyse_types(self, env): if self.type is None: base_type = self.base_type.analyse(env) @@ -5317,7 +5317,7 @@ def analyse_types(self, env): if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct): error(self.pos, "Python objects cannot be cast from pointers of primitive types") else: - # Should this be an error? + # Should this be an error? warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type)) self.operand = self.operand.coerce_to_simple(env) elif from_py and not to_py: @@ -5345,7 +5345,7 @@ def calculate_constant_result(self): # we usually do not know the result of a type cast at code # generation time pass - + def calculate_result_code(self): if self.type.is_complex: operand_result = self.operand.result() @@ -5358,15 +5358,15 @@ def calculate_result_code(self): return "%s(%s, %s)" % ( self.type.from_parts, real_part, - imag_part) + imag_part) else: return self.type.cast_code(self.operand.result()) - + def get_constant_c_result_code(self): operand_result = self.operand.get_constant_c_result_code() if operand_result: return self.type.cast_code(operand_result) - + def result_as(self, type): if self.type.is_pyobject and not self.is_temp: # Optimise away some unnecessary casting @@ -5385,7 +5385,7 @@ def generate_result_code(self, code): class SizeofNode(ExprNode): # Abstract base class for sizeof(x) expression nodes. - + type = PyrexTypes.c_size_t_type def check_const(self): @@ -5400,10 +5400,10 @@ class SizeofTypeNode(SizeofNode): # # base_type CBaseTypeNode # declarator CDeclaratorNode - + subexprs = [] arg_type = None - + def analyse_types(self, env): # we may have incorrectly interpreted a dotted name as a type rather than an attribute # this could be better handled by more uniformly treating types as runtime-available objects @@ -5424,7 +5424,7 @@ def analyse_types(self, env): _, arg_type = self.declarator.analyse(base_type, env) self.arg_type = arg_type self.check_type() - + def check_type(self): arg_type = self.arg_type if arg_type.is_pyobject and not arg_type.is_extension_type: @@ -5433,7 +5433,7 @@ def check_type(self): error(self.pos, "Cannot take sizeof void") elif not arg_type.is_complete(): error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type) - + def calculate_result_code(self): if self.arg_type.is_extension_type: # the size of the pointer is boring @@ -5442,15 +5442,15 @@ def calculate_result_code(self): else: arg_code = self.arg_type.declaration_code("") return "(sizeof(%s))" % arg_code - + class SizeofVarNode(SizeofNode): # C sizeof function applied to a variable # # operand ExprNode - + subexprs = ['operand'] - + def analyse_types(self, env): # We may actually be looking at a type rather than a variable... # If we are, traditional analysis would fail... @@ -5461,10 +5461,10 @@ def analyse_types(self, env): self.check_type() else: self.operand.analyse_types(env) - + def calculate_result_code(self): return "(sizeof(%s))" % self.operand.result() - + def generate_result_code(self, code): pass @@ -5473,12 +5473,12 @@ class TypeofNode(ExprNode): # # operand ExprNode # literal StringNode # internal - + literal = None type = py_object_type - + subexprs = ['literal'] # 'operand' will be ignored after type analysis! - + def analyse_types(self, env): self.operand.analyse_types(env) self.literal = StringNode( @@ -5491,7 +5491,7 @@ def may_be_none(self): def generate_evaluation_code(self, code): self.literal.generate_evaluation_code(code) - + def calculate_result_code(self): return self.literal.calculate_result_code() @@ -5549,7 +5549,7 @@ class BinopNode(ExprNode): # - Check operand types and coerce if needed. # - Determine result type and result code fragment. # - Allocate temporary for result if needed. - + subexprs = ['operand1', 'operand2'] inplace = False @@ -5567,16 +5567,16 @@ def compile_time_value(self, denv): return func(operand1, operand2) except Exception, e: self.compile_time_value_error(e) - + def infer_type(self, env): return self.result_type(self.operand1.infer_type(env), self.operand2.infer_type(env)) - + def analyse_types(self, env): self.operand1.analyse_types(env) self.operand2.analyse_types(env) self.analyse_operation(env) - + def analyse_operation(self, env): if self.is_py_operation(): self.coerce_operands_to_pyobjects(env) @@ -5588,17 +5588,17 @@ def analyse_operation(self, env): self.analyse_cpp_operation(env) else: self.analyse_c_operation(env) - + def is_py_operation(self): return self.is_py_operation_types(self.operand1.type, self.operand2.type) - + def is_py_operation_types(self, type1, type2): return type1.is_pyobject or type2.is_pyobject def is_cpp_operation(self): return (self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class) - + def analyse_cpp_operation(self, env): type1 = self.operand1.type type2 = self.operand2.type @@ -5615,7 +5615,7 @@ def analyse_cpp_operation(self, env): self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env) self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env) self.type = func_type.return_type - + def result_type(self, type1, type2): if self.is_py_operation_types(type1, type2): if type2.is_string: @@ -5649,14 +5649,14 @@ def result_type(self, type1, type2): def nogil_check(self, env): if self.is_py_operation(): self.gil_error() - + def coerce_operands_to_pyobjects(self, env): self.operand1 = self.operand1.coerce_to_pyobject(env) self.operand2 = self.operand2.coerce_to_pyobject(env) - + def check_const(self): return self.operand1.check_const() and self.operand2.check_const() - + def generate_result_code(self, code): #print "BinopNode.generate_result_code:", self.operand1, self.operand2 ### if self.operand1.type.is_pyobject: @@ -5667,37 +5667,37 @@ def generate_result_code(self, code): extra_args = "" code.putln( "%s = %s(%s, %s%s); %s" % ( - self.result(), - function, + self.result(), + function, self.operand1.py_result(), self.operand2.py_result(), extra_args, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - + def type_error(self): if not (self.operand1.type.is_error or self.operand2.type.is_error): error(self.pos, "Invalid operand types for '%s' (%s; %s)" % - (self.operator, self.operand1.type, + (self.operator, self.operand1.type, self.operand2.type)) self.type = PyrexTypes.error_type class CBinopNode(BinopNode): - + def analyse_types(self, env): BinopNode.analyse_types(self, env) if self.is_py_operation(): self.type = PyrexTypes.error_type - + def py_operation_function(): return "" - + def calculate_result_code(self): return "(%s %s %s)" % ( - self.operand1.result(), - self.operator, + self.operand1.result(), + self.operator, self.operand2.result()) @@ -5708,9 +5708,9 @@ def make_binop_node(pos, **operands): class NumBinopNode(BinopNode): # Binary operation taking numeric arguments. - + infix = True - + def analyse_c_operation(self, env): type1 = self.operand1.type type2 = self.operand2.type @@ -5723,7 +5723,7 @@ def analyse_c_operation(self, env): if not self.infix or (type1.is_numeric and type2.is_numeric): self.operand1 = self.operand1.coerce_to(self.type, env) self.operand2 = self.operand2.coerce_to(self.type, env) - + def compute_c_result_type(self, type1, type2): if self.c_types_okay(type1, type2): widest_type = PyrexTypes.widest_numeric_type(type1, type2) @@ -5742,7 +5742,7 @@ def get_constant_c_result_code(self): return "(%s %s %s)" % (value1, self.operator, value2) else: return None - + def c_types_okay(self, type1, type2): #print "NumBinopNode.c_types_okay:", type1, type2 ### return (type1.is_numeric or type1.is_enum) \ @@ -5751,8 +5751,8 @@ def c_types_okay(self, type1, type2): def calculate_result_code(self): if self.infix: return "(%s %s %s)" % ( - self.operand1.result(), - self.operator, + self.operand1.result(), + self.operator, self.operand2.result()) else: func = self.type.binary_op(self.operator) @@ -5762,12 +5762,12 @@ def calculate_result_code(self): func, self.operand1.result(), self.operand2.result()) - + def is_py_operation_types(self, type1, type2): return (type1 is PyrexTypes.c_py_unicode_type or type2 is PyrexTypes.c_py_unicode_type or BinopNode.is_py_operation_types(self, type1, type2)) - + def py_operation_function(self): fuction = self.py_functions[self.operator] if self.inplace: @@ -5791,16 +5791,16 @@ def py_operation_function(self): class IntBinopNode(NumBinopNode): # Binary operation taking integer arguments. - + def c_types_okay(self, type1, type2): #print "IntBinopNode.c_types_okay:", type1, type2 ### return (type1.is_int or type1.is_enum) \ and (type2.is_int or type2.is_enum) - + class AddNode(NumBinopNode): # '+' operator. - + def is_py_operation_types(self, type1, type2): if type1.is_string and type2.is_string: return 1 @@ -5820,7 +5820,7 @@ def compute_c_result_type(self, type1, type2): class SubNode(NumBinopNode): # '-' operator. - + def compute_c_result_type(self, type1, type2): if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum): return type1 @@ -5833,7 +5833,7 @@ def compute_c_result_type(self, type1, type2): class MulNode(NumBinopNode): # '*' operator. - + def is_py_operation_types(self, type1, type2): if (type1.is_string and type2.is_int) \ or (type2.is_string and type1.is_int): @@ -5844,7 +5844,7 @@ def is_py_operation_types(self, type1, type2): class DivNode(NumBinopNode): # '/' or '//' operator. - + cdivision = None truedivision = None # == "unknown" if operator == '/' ctruedivision = False @@ -5914,14 +5914,14 @@ def zero_division_message(self): def generate_evaluation_code(self, code): if not self.type.is_pyobject and not self.type.is_complex: if self.cdivision is None: - self.cdivision = (code.globalstate.directives['cdivision'] + self.cdivision = (code.globalstate.directives['cdivision'] or not self.type.signed or self.type.is_float) if not self.cdivision: code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type)) NumBinopNode.generate_evaluation_code(self, code) self.generate_div_warning_code(code) - + def generate_div_warning_code(self, code): if not self.type.is_pyobject: if self.zerodivision_check: @@ -5936,7 +5936,7 @@ def generate_div_warning_code(self, code): if self.type.is_int and self.type.signed and self.operator != '%': code.globalstate.use_utility_code(division_overflow_test_code) code.putln("else if (sizeof(%s) == sizeof(long) && unlikely(%s == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % ( - self.type.declaration_code(''), + self.type.declaration_code(''), self.operand2.result(), self.operand1.result())) code.putln('PyErr_Format(PyExc_OverflowError, "value too large to perform division");') @@ -5951,7 +5951,7 @@ def generate_div_warning_code(self, code): code.put("if (__Pyx_cdivision_warning()) ") code.put_goto(code.error_label) code.putln("}") - + def calculate_result_code(self): if self.type.is_complex: return NumBinopNode.calculate_result_code(self) @@ -5971,7 +5971,7 @@ def calculate_result_code(self): else: return "__Pyx_div_%s(%s, %s)" % ( self.type.specialization_name(), - self.operand1.result(), + self.operand1.result(), self.operand2.result()) @@ -5988,7 +5988,7 @@ def zero_division_message(self): return "integer division or modulo by zero" else: return "float divmod()" - + def generate_evaluation_code(self, code): if not self.type.is_pyobject: if self.cdivision is None: @@ -6001,27 +6001,27 @@ def generate_evaluation_code(self, code): mod_float_utility_code.specialize(self.type, math_h_modifier=self.type.math_h_modifier)) NumBinopNode.generate_evaluation_code(self, code) self.generate_div_warning_code(code) - + def calculate_result_code(self): if self.cdivision: if self.type.is_float: return "fmod%s(%s, %s)" % ( self.type.math_h_modifier, - self.operand1.result(), + self.operand1.result(), self.operand2.result()) else: return "(%s %% %s)" % ( - self.operand1.result(), + self.operand1.result(), self.operand2.result()) else: return "__Pyx_mod_%s(%s, %s)" % ( self.type.specialization_name(), - self.operand1.result(), + self.operand1.result(), self.operand2.result()) class PowNode(NumBinopNode): # '**' operator. - + def analyse_c_operation(self, env): NumBinopNode.analyse_c_operation(self, env) if self.type.is_complex: @@ -6037,7 +6037,7 @@ def analyse_c_operation(self, env): else: self.pow_func = "__Pyx_pow_%s" % self.type.declaration_code('').replace(' ', '_') env.use_utility_code( - int_pow_utility_code.specialize(func_name=self.pow_func, + int_pow_utility_code.specialize(func_name=self.pow_func, type=self.type.declaration_code(''))) def calculate_result_code(self): @@ -6048,8 +6048,8 @@ def typecast(operand): else: return self.type.cast_code(operand.result()) return "%s(%s, %s)" % ( - self.pow_func, - typecast(self.operand1), + self.pow_func, + typecast(self.operand1), typecast(self.operand2)) @@ -6065,9 +6065,9 @@ class BoolBinopNode(ExprNode): # operator string # operand1 ExprNode # operand2 ExprNode - + subexprs = ['operand1', 'operand2'] - + def infer_type(self, env): type1 = self.operand1.infer_type(env) type2 = self.operand2.infer_type(env) @@ -6088,7 +6088,7 @@ def calculate_constant_result(self): self.constant_result = \ self.operand1.constant_result or \ self.operand2.constant_result - + def compile_time_value(self, denv): if self.operator == 'and': return self.operand1.compile_time_value(denv) \ @@ -6096,7 +6096,7 @@ def compile_time_value(self, denv): else: return self.operand1.compile_time_value(denv) \ or self.operand2.compile_time_value(denv) - + def coerce_to_boolean(self, env): return BoolBinopNode( self.pos, @@ -6112,7 +6112,7 @@ def analyse_types(self, env): self.type = PyrexTypes.independent_spanning_type(self.operand1.type, self.operand2.type) self.operand1 = self.operand1.coerce_to(self.type, env) self.operand2 = self.operand2.coerce_to(self.type, env) - + # For what we're about to do, it's vital that # both operands be temp nodes. self.operand1 = self.operand1.coerce_to_simple(env) @@ -6123,7 +6123,7 @@ def analyse_types(self, env): def check_const(self): return self.operand1.check_const() and self.operand2.check_const() - + def generate_evaluation_code(self, code): code.mark_pos(self.pos) self.operand1.generate_evaluation_code(code) @@ -6151,7 +6151,7 @@ def generate_evaluation_code(self, code): self.operand1.generate_post_assignment_code(code) self.operand1.free_temps(code) code.putln("}") - + def generate_operand1_test(self, code): # Generate code to test the truth of the first operand. if self.type.is_pyobject: @@ -6173,15 +6173,15 @@ class CondExprNode(ExprNode): # test ExprNode # true_val ExprNode # false_val ExprNode - + true_val = None false_val = None - + subexprs = ['test', 'true_val', 'false_val'] - + def type_dependencies(self, env): return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env) - + def infer_type(self, env): return PyrexTypes.independent_spanning_type(self.true_val.infer_type(env), self.false_val.infer_type(env)) @@ -6204,22 +6204,22 @@ def analyse_types(self, env): self.is_temp = 1 if self.type == PyrexTypes.error_type: self.type_error() - + def type_error(self): if not (self.true_val.type.is_error or self.false_val.type.is_error): error(self.pos, "Incompatable types in conditional expression (%s; %s)" % (self.true_val.type, self.false_val.type)) self.type = PyrexTypes.error_type - + def check_const(self): - return (self.test.check_const() + return (self.test.check_const() and self.true_val.check_const() and self.false_val.check_const()) - + def generate_evaluation_code(self, code): # Because subexprs may not be evaluated we can use a more optimal # subexpr allocation strategy than the default, so override evaluation_code. - + code.mark_pos(self.pos) self.allocate_temp_result(code) self.test.generate_evaluation_code(code) @@ -6424,7 +6424,7 @@ def is_c_string_contains(self): and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or (self.operand1.type is PyrexTypes.c_py_unicode_type and self.operand2.type is unicode_type)) - + def is_ptr_contains(self): if self.operator in ('in', 'not_in'): container_type = self.operand2.type @@ -6441,15 +6441,15 @@ def find_special_bool_compare_function(self, env): return True return False - def generate_operation_code(self, code, result_code, + def generate_operation_code(self, code, result_code, operand1, op , operand2): if self.type.is_pyobject: coerce_result = "__Pyx_PyBool_FromLong" else: coerce_result = "" - if 'not' in op: + if 'not' in op: negation = "!" - else: + else: negation = "" if self.special_bool_cmp_function: if operand1.type.is_pyobject: @@ -6489,30 +6489,30 @@ def generate_operation_code(self, code, result_code, coerce_result, negation, method, - operand2.py_result(), - operand1.py_result(), + operand2.py_result(), + operand1.py_result(), got_ref, error_clause(result_code, self.pos))) elif (operand1.type.is_pyobject and op not in ('is', 'is_not')): code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s" % ( - result_code, - operand1.py_result(), - operand2.py_result(), + result_code, + operand1.py_result(), + operand2.py_result(), richcmp_constants[op], code.error_goto_if_null(result_code, self.pos))) code.put_gotref(result_code) elif operand1.type.is_complex: - if op == "!=": + if op == "!=": negation = "!" - else: + else: negation = "" code.putln("%s = %s(%s%s(%s, %s));" % ( - result_code, + result_code, coerce_result, negation, - operand1.type.unary_op('eq'), - operand1.result(), + operand1.type.unary_op('eq'), + operand1.result(), operand2.result())) else: type1 = operand1.type @@ -6527,10 +6527,10 @@ def generate_operation_code(self, code, result_code, code1 = operand1.result_as(common_type) code2 = operand2.result_as(common_type) code.putln("%s = %s(%s %s %s);" % ( - result_code, - coerce_result, - code1, - self.c_operator(op), + result_code, + coerce_result, + code1, + self.c_operator(op), code2)) def c_operator(self, op): @@ -6540,7 +6540,7 @@ def c_operator(self, op): return "!=" else: return op - + contains_utility_code = UtilityCode( proto=""" static CYTHON_INLINE long __Pyx_NegateNonNeg(long b) { return unlikely(b < 0) ? b : !b; } @@ -6630,14 +6630,14 @@ class PrimaryCmpNode(ExprNode, CmpNode): # operand1 ExprNode # operand2 ExprNode # cascade CascadedCmpNode - + # We don't use the subexprs mechanism, because # things here are too complicated for it to handle. # Instead, we override all the framework methods # which use it. - + child_attrs = ['operand1', 'operand2', 'cascade'] - + cascade = None def infer_type(self, env): @@ -6649,7 +6649,7 @@ def type_dependencies(self, env): def calculate_constant_result(self): self.calculate_cascaded_constant_result(self.operand1.constant_result) - + def compile_time_value(self, denv): operand1 = self.operand1.compile_time_value(denv) return self.cascaded_compile_time_value(operand1, denv) @@ -6719,7 +6719,7 @@ def analyse_types(self, env): cdr = cdr.cascade if self.is_pycmp or self.cascade: self.is_temp = 1 - + def analyse_cpp_comparison(self, env): type1 = self.operand1.type type2 = self.operand2.type @@ -6739,11 +6739,11 @@ def analyse_cpp_comparison(self, env): self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env) self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env) self.type = func_type.return_type - + def has_python_operands(self): return (self.operand1.type.is_pyobject or self.operand2.type.is_pyobject) - + def check_const(self): if self.cascade: self.not_const() @@ -6759,8 +6759,8 @@ def calculate_result_code(self): negation = "" return "(%s%s(%s, %s))" % ( negation, - self.operand1.type.binary_op('=='), - self.operand1.result(), + self.operand1.type.binary_op('=='), + self.operand1.result(), self.operand2.result()) elif self.is_c_string_contains(): if self.operand2.type is bytes_type: @@ -6774,7 +6774,7 @@ def calculate_result_code(self): return "(%s%s(%s, %s))" % ( negation, method, - self.operand2.result(), + self.operand2.result(), self.operand1.result()) else: return "(%s %s %s)" % ( @@ -6787,7 +6787,7 @@ def generate_evaluation_code(self, code): self.operand2.generate_evaluation_code(code) if self.is_temp: self.allocate_temp_result(code) - self.generate_operation_code(code, self.result(), + self.generate_operation_code(code, self.result(), self.operand1, self.operator, self.operand2) if self.cascade: self.cascade.generate_evaluation_code(code, @@ -6802,13 +6802,13 @@ def generate_subexpr_disposal_code(self, code): # so only need to dispose of the two main operands. self.operand1.generate_disposal_code(code) self.operand2.generate_disposal_code(code) - + def free_subexpr_temps(self, code): # If this is called, it is a non-cascaded cmp, # so only need to dispose of the two main operands. self.operand1.free_temps(code) self.operand2.free_temps(code) - + def annotate(self, code): self.operand1.annotate(code) self.operand2.annotate(code) @@ -6817,9 +6817,9 @@ def annotate(self, code): class CascadedCmpNode(Node, CmpNode): - # A CascadedCmpNode is not a complete expression node. It - # hangs off the side of another comparison node, shares - # its left operand with that node, and shares its result + # A CascadedCmpNode is not a complete expression node. It + # hangs off the side of another comparison node, shares + # its left operand with that node, and shares its result # with the PrimaryCmpNode at the head of the chain. # # operator string @@ -6849,7 +6849,7 @@ def analyse_types(self, env): def has_python_operands(self): return self.operand2.type.is_pyobject - + def coerce_operands_to_pyobjects(self, env): self.operand2 = self.operand2.coerce_to_pyobject(env) if self.operand2.type is dict_type and self.operator in ('in', 'not_in'): @@ -6862,7 +6862,7 @@ def coerce_cascaded_operands_to_temp(self, env): #self.operand2 = self.operand2.coerce_to_temp(env) #CTT self.operand2 = self.operand2.coerce_to_simple(env) self.cascade.coerce_cascaded_operands_to_temp(env) - + def generate_evaluation_code(self, code, result, operand1): if self.type.is_pyobject: code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result) @@ -6870,7 +6870,7 @@ def generate_evaluation_code(self, code, result, operand1): else: code.putln("if (%s) {" % result) self.operand2.generate_evaluation_code(code) - self.generate_operation_code(code, result, + self.generate_operation_code(code, result, operand1, self.operator, self.operand2) if self.cascade: self.cascade.generate_evaluation_code( @@ -6904,11 +6904,11 @@ def annotate(self, code): } def binop_node(pos, operator, operand1, operand2, inplace=False): - # Construct binop node of appropriate class for + # Construct binop node of appropriate class for # given operator. - return binop_node_classes[operator](pos, - operator = operator, - operand1 = operand1, + return binop_node_classes[operator](pos, + operator = operator, + operand1 = operand1, operand2 = operand2, inplace = inplace) @@ -6927,10 +6927,10 @@ class CoercionNode(ExprNode): # Abstract base class for coercion nodes. # # arg ExprNode node being coerced - + subexprs = ['arg'] constant_result = not_a_constant - + def __init__(self, arg): self.pos = arg.pos self.arg = arg @@ -6940,7 +6940,7 @@ def __init__(self, arg): def calculate_constant_result(self): # constant folding can break type coercion, so this is disabled pass - + def annotate(self, code): self.arg.annotate(code) if self.arg.type != self.type: @@ -6950,14 +6950,14 @@ def annotate(self, code): class CastNode(CoercionNode): # Wrap a node in a C type cast. - + def __init__(self, arg, new_type): CoercionNode.__init__(self, arg) self.type = new_type def may_be_none(self): return self.arg.may_be_none() - + def calculate_result_code(self): return self.arg.result_as(self.type) @@ -6981,7 +6981,7 @@ def __init__(self, arg, dst_type, env, notnone=False): nogil_check = Node.gil_error gil_message = "Python type test" - + def analyse_types(self, env): pass @@ -6989,10 +6989,10 @@ def may_be_none(self): if self.notnone: return False return self.arg.may_be_none() - + def result_in_temp(self): return self.arg.result_in_temp() - + def is_ephemeral(self): return self.arg.is_ephemeral() @@ -7002,7 +7002,7 @@ def calculate_constant_result(self): def calculate_result_code(self): return self.arg.result() - + def generate_result_code(self, code): if self.type.typeobj_is_available(): if not self.type.is_builtin_type: @@ -7014,7 +7014,7 @@ def generate_result_code(self, code): else: error(self.pos, "Cannot test type of extern C class " "without type object name specification") - + def generate_post_assignment_code(self, code): self.arg.generate_post_assignment_code(code) @@ -7045,7 +7045,7 @@ def result_in_temp(self): def calculate_result_code(self): return self.arg.result() - + def generate_result_code(self, code): code.putln( "if (unlikely(%s == Py_None)) {" % self.arg.result()) @@ -7066,7 +7066,7 @@ def free_temps(self, code): class CoerceToPyTypeNode(CoercionNode): # This node is used to convert a C data type # to a Python object. - + type = py_object_type is_temp = 1 @@ -7100,7 +7100,7 @@ def coerce_to_boolean(self, env): return self.arg.coerce_to_temp(env) else: return CoerceToBooleanNode(self, env) - + def coerce_to_integer(self, env): # If not already some C integer type, coerce to longint. if self.arg.type.is_int: @@ -7115,9 +7115,9 @@ def analyse_types(self, env): def generate_result_code(self, code): function = self.arg.type.to_py_function code.putln('%s = %s(%s); %s' % ( - self.result(), - function, - self.arg.result(), + self.result(), + function, + self.arg.result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) @@ -7176,7 +7176,7 @@ def __init__(self, result_type, arg, env): if self.type.is_string and self.arg.is_ephemeral(): error(arg.pos, "Obtaining char * from temporary Python value") - + def analyse_types(self, env): # The arg is always already analysed pass @@ -7188,7 +7188,7 @@ def generate_result_code(self, code): if self.type.is_enum: rhs = typecast(self.type, c_long_type, rhs) code.putln('%s = %s; %s' % ( - self.result(), + self.result(), rhs, code.error_goto_if(self.type.error_condition(self.result()), self.pos))) if self.type.is_pyobject: @@ -7198,7 +7198,7 @@ def generate_result_code(self, code): class CoerceToBooleanNode(CoercionNode): # This node is used when a result needs to be used # in a boolean context. - + type = PyrexTypes.c_bint_type _special_builtins = { @@ -7218,13 +7218,13 @@ def nogil_check(self, env): self.gil_error() gil_message = "Truth-testing Python object" - + def check_const(self): if self.is_temp: self.not_const() return False return self.arg.check_const() - + def calculate_result_code(self): return "(%s != 0)" % self.arg.result() @@ -7241,8 +7241,8 @@ def generate_result_code(self, code): else: code.putln( "%s = __Pyx_PyObject_IsTrue(%s); %s" % ( - self.result(), - self.arg.py_result(), + self.result(), + self.arg.py_result(), code.error_goto_if_neg(self.result(), self.pos))) class CoerceToComplexNode(CoercionNode): @@ -7265,7 +7265,7 @@ def calculate_result_code(self): self.type.from_parts, real_part, imag_part) - + def generate_result_code(self, code): pass @@ -7287,7 +7287,7 @@ def __init__(self, arg, env): def analyse_types(self, env): # The arg is always already analysed pass - + def coerce_to_boolean(self, env): self.arg = self.arg.coerce_to_boolean(env) if self.arg.is_simple(): @@ -7310,12 +7310,12 @@ class CloneNode(CoercionNode): # to be used multiple times. The argument node's result must # be in a temporary. This node "borrows" the result from the # argument node, and does not generate any evaluation or - # disposal code for it. The original owner of the argument + # disposal code for it. The original owner of the argument # node is responsible for doing those things. - + subexprs = [] # Arg is not considered a subexpr nogil_check = None - + def __init__(self, arg): CoercionNode.__init__(self, arg) if hasattr(arg, 'type'): @@ -7323,13 +7323,13 @@ def __init__(self, arg): self.result_ctype = arg.result_ctype if hasattr(arg, 'entry'): self.entry = arg.entry - + def result(self): return self.arg.result() - + def type_dependencies(self, env): return self.arg.type_dependencies(env) - + def infer_type(self, env): return self.arg.infer_type(env) @@ -7339,27 +7339,27 @@ def analyse_types(self, env): self.is_temp = 1 if hasattr(self.arg, 'entry'): self.entry = self.arg.entry - + def generate_evaluation_code(self, code): pass def generate_result_code(self, code): pass - + def generate_disposal_code(self, code): pass - + def free_temps(self, code): pass class ModuleRefNode(ExprNode): # Simple returns the module object - + type = py_object_type is_temp = False subexprs = [] - + def analyse_types(self, env): pass @@ -7374,11 +7374,11 @@ def generate_result_code(self, code): class DocstringRefNode(ExprNode): # Extracts the docstring of the body element - + subexprs = ['body'] type = py_object_type is_temp = True - + def __init__(self, pos, body): ExprNode.__init__(self, pos) assert body.type.is_pyobject @@ -7802,7 +7802,7 @@ def generate_result_code(self, code): #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif -""", +""", requires = [raise_noneindex_error_utility_code]) #------------------------------------------------------------------------------------ @@ -8001,7 +8001,7 @@ def generate_result_code(self, code): tuple_unpacking_error_code = UtilityCode( proto = """ static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ -""", +""", impl = """ static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { if (t == Py_None) { @@ -8012,7 +8012,7 @@ def generate_result_code(self, code): __Pyx_RaiseTooManyValuesError(index); } } -""", +""", requires = [raise_none_iter_error_utility_code, raise_need_more_values_to_unpack, raise_too_many_values_to_unpack] @@ -8075,7 +8075,7 @@ def generate_result_code(self, code): return result; /* may be NULL */ } } -""", +""", ) @@ -8154,9 +8154,9 @@ def generate_result_code(self, code): """, impl=""" static int __Pyx_cdivision_warning(void) { - return PyErr_WarnExplicit(PyExc_RuntimeWarning, + return PyErr_WarnExplicit(PyExc_RuntimeWarning, "division with oppositely signed operands, C and Python semantics differ", - %(FILENAME)s, + %(FILENAME)s, %(LINENO)s, __Pyx_MODULE_NAME, NULL); diff --git a/Cython/Compiler/Interpreter.py b/Cython/Compiler/Interpreter.py index 298041d012a..83cb184f96b 100644 --- a/Cython/Compiler/Interpreter.py +++ b/Cython/Compiler/Interpreter.py @@ -14,7 +14,7 @@ class EmptyScope(object): def lookup(self, name): return None - + empty_scope = EmptyScope() def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()): @@ -45,7 +45,7 @@ def interpret(node, ix): raise CompileError(node.pos, "Type not allowed here.") else: return (node.compile_time_value(empty_scope), node.pos) - + if optlist: optlist = [interpret(x, ix) for ix, x in enumerate(optlist)] if optdict: diff --git a/Cython/Compiler/Lexicon.py b/Cython/Compiler/Lexicon.py index f31e5be53f4..ad736df130f 100644 --- a/Cython/Compiler/Lexicon.py +++ b/Cython/Compiler/Lexicon.py @@ -19,12 +19,12 @@ def make_lexicon(): octdigit = Any("01234567") hexdigit = Any("0123456789ABCDEFabcdef") indentation = Bol + Rep(Any(" \t")) - + decimal = Rep1(digit) dot = Str(".") exponent = Any("Ee") + Opt(Any("+-")) + decimal decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal) - + name = letter + Rep(letter | digit) intconst = decimal | (Str("0") + ((Any("Xx") + Rep1(hexdigit)) | (Any("Oo") + Rep1(octdigit)) | @@ -33,33 +33,33 @@ def make_lexicon(): intliteral = intconst + intsuffix fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent) imagconst = (intconst | fltconst) + Any("jJ") - + sq_string = ( - Str("'") + - Rep(AnyBut("\\\n'") | (Str("\\") + AnyChar)) + + Str("'") + + Rep(AnyBut("\\\n'") | (Str("\\") + AnyChar)) + Str("'") ) - + dq_string = ( - Str('"') + - Rep(AnyBut('\\\n"') | (Str("\\") + AnyChar)) + + Str('"') + + Rep(AnyBut('\\\n"') | (Str("\\") + AnyChar)) + Str('"') ) - + non_sq = AnyBut("'") | (Str('\\') + AnyChar) tsq_string = ( Str("'''") - + Rep(non_sq | (Str("'") + non_sq) | (Str("''") + non_sq)) + + Rep(non_sq | (Str("'") + non_sq) | (Str("''") + non_sq)) + Str("'''") ) - + non_dq = AnyBut('"') | (Str('\\') + AnyChar) tdq_string = ( Str('"""') - + Rep(non_dq | (Str('"') + non_dq) | (Str('""') + non_dq)) + + Rep(non_dq | (Str('"') + non_dq) | (Str('""') + non_dq)) + Str('"""') ) - + beginstring = Opt(Any(string_prefixes)) + Opt(Any(raw_prefixes)) + (Str("'") | Str('"') | Str("'''") | Str('"""')) two_oct = octdigit + octdigit three_oct = octdigit + octdigit + octdigit @@ -68,21 +68,21 @@ def make_lexicon(): escapeseq = Str("\\") + (two_oct | three_oct | Str('u') + four_hex | Str('x') + two_hex | Str('U') + four_hex + four_hex | AnyChar) - + deco = Str("@") bra = Any("([{") ket = Any(")]}") punct = Any(":,;+-*/|&<>=.%`~^?") diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//", - "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=", + "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=", "<<=", ">>=", "**=", "//=", "->") spaces = Rep1(Any(" \t\f")) escaped_newline = Str("\\\n") lineterm = Eol + Opt(Str("\n")) - comment = Str("#") + Rep(AnyBut("\n")) - + comment = Str("#") + Rep(AnyBut("\n")) + return Lexicon([ (name, IDENT), (intliteral, 'INT'), @@ -90,25 +90,25 @@ def make_lexicon(): (imagconst, 'IMAG'), (deco, 'DECORATOR'), (punct | diphthong, TEXT), - + (bra, Method('open_bracket_action')), (ket, Method('close_bracket_action')), (lineterm, Method('newline_action')), - + #(stringlit, 'STRING'), (beginstring, Method('begin_string_action')), - + (comment, IGNORE), (spaces, IGNORE), (escaped_newline, IGNORE), - + State('INDENT', [ (comment + lineterm, Method('commentline')), (Opt(spaces) + Opt(comment) + lineterm, IGNORE), (indentation, Method('indentation_action')), (Eof, Method('eof_action')) ]), - + State('SQ_STRING', [ (escapeseq, 'ESCAPE'), (Rep1(AnyBut("'\"\n\\")), 'CHARS'), @@ -117,7 +117,7 @@ def make_lexicon(): (Str("'"), Method('end_string_action')), (Eof, 'EOF') ]), - + State('DQ_STRING', [ (escapeseq, 'ESCAPE'), (Rep1(AnyBut('"\n\\')), 'CHARS'), @@ -126,7 +126,7 @@ def make_lexicon(): (Str('"'), Method('end_string_action')), (Eof, 'EOF') ]), - + State('TSQ_STRING', [ (escapeseq, 'ESCAPE'), (Rep1(AnyBut("'\"\n\\")), 'CHARS'), @@ -135,7 +135,7 @@ def make_lexicon(): (Str("'''"), Method('end_string_action')), (Eof, 'EOF') ]), - + State('TDQ_STRING', [ (escapeseq, 'ESCAPE'), (Rep1(AnyBut('"\'\n\\')), 'CHARS'), @@ -144,10 +144,10 @@ def make_lexicon(): (Str('"""'), Method('end_string_action')), (Eof, 'EOF') ]), - + (Eof, Method('eof_action')) ], - + # FIXME: Plex 1.9 needs different args here from Plex 1.1.4 #debug_flags = scanner_debug_flags, #debug_file = scanner_dump_file diff --git a/Cython/Compiler/Main.py b/Cython/Compiler/Main.py index 13b0b49eabd..3088db34484 100644 --- a/Cython/Compiler/Main.py +++ b/Cython/Compiler/Main.py @@ -68,7 +68,7 @@ class Context(object): # include_directories [string] # future_directives [object] # language_level int currently 2 or 3 for Python 2/3 - + def __init__(self, include_directories, compiler_directives, cpp=False, language_level=2): #self.modules = {"__builtin__" : BuiltinScope()} import Builtin, CythonScope @@ -86,7 +86,7 @@ def __init__(self, include_directories, compiler_directives, cpp=False, language self.include_directories = include_directories + [standard_include_path] self.set_language_level(language_level) - + self.gdb_debug_outputwriter = None def set_language_level(self, level): @@ -120,12 +120,12 @@ def create_pipeline(self, pxd, py=False): else: _check_c_declarations = check_c_declarations _specific_post_parse = None - + if py and not pxd: _align_function_definitions = AlignFunctionDefinitions(self) else: _align_function_definitions = None - + return [ NormalizeTree(self), PostParse(self), @@ -190,7 +190,7 @@ def inject_pxd_code(module_node): debug_transform = [DebugTransform(self, options, result)] else: debug_transform = [] - + return list(itertools.chain( [create_parse(self)], self.create_pipeline(pxd=False, py=py), @@ -214,7 +214,7 @@ def parse_pxd(source_desc): return [parse_pxd] + self.create_pipeline(pxd=True) + [ ExtractPxdCode(self), ] - + def create_py_pipeline(self, options, result): return self.create_pyx_pipeline(options, result, py=True) @@ -223,7 +223,7 @@ def process_pxd(self, source_desc, scope, module_name): pipeline = self.create_pxd_pipeline(scope, module_name) result = self.run_pipeline(pipeline, source_desc) return result - + def nonfatal_error(self, exc): return Errors.report_error(exc) @@ -253,7 +253,7 @@ def run_pipeline(self, pipeline, source): error = err return (error, data) - def find_module(self, module_name, + def find_module(self, module_name, relative_to = None, pos = None, need_pxd = 1): # Finds and returns the module scope corresponding to # the given relative or absolute module name. If this @@ -320,7 +320,7 @@ def find_module(self, module_name, except CompileError: pass return scope - + def find_pxd_file(self, qualified_name, pos): # Search include path for the .pxd file corresponding to the # given fully-qualified module name. @@ -355,7 +355,7 @@ def find_pyx_file(self, qualified_name, pos): # Search include path for the .pyx file corresponding to the # given fully-qualified module name, as for find_pxd_file(). return self.search_include_directories(qualified_name, ".pyx", pos) - + def find_include_file(self, filename, pos): # Search list of include directories for filename. # Reports an error and returns None if not found. @@ -364,7 +364,7 @@ def find_include_file(self, filename, pos): if not path: error(pos, "'%s' not found" % filename) return path - + def search_include_directories(self, qualified_name, suffix, pos, include=False): # Search the list of include directories for the given @@ -445,15 +445,15 @@ def c_file_out_of_date(self, source_path): if dep_path and Utils.file_newer_than(dep_path, c_time): return 1 return 0 - + def find_cimported_module_names(self, source_path): return [ name for kind, name in self.read_dependency_file(source_path) if kind == "cimport" ] def is_package_dir(self, dir_path): # Return true if the given directory is a package directory. - for filename in ("__init__.py", - "__init__.pyx", + for filename in ("__init__.py", + "__init__.pyx", "__init__.pxd"): path = os.path.join(dir_path, filename) if Utils.path_exists(path): @@ -479,7 +479,7 @@ def find_submodule(self, name): # Find a top-level module, creating a new one if needed. scope = self.lookup_submodule(name) if not scope: - scope = ModuleScope(name, + scope = ModuleScope(name, parent_module = None, context = self) self.modules[name] = scope return scope @@ -590,7 +590,7 @@ def run_pipeline(source, options, full_module_name = None): # Set up result object result = create_default_resultobj(source, options) - + # Get pipeline if source_desc.filename.endswith(".py"): pipeline = context.create_py_pipeline(options, result) @@ -601,7 +601,7 @@ def run_pipeline(source, options, full_module_name = None): err, enddata = context.run_pipeline(pipeline, source) context.teardown_errors(err, options, result) return result - + #------------------------------------------------------------------------ # @@ -622,7 +622,7 @@ def __init__(self, source_desc, full_module_name, cwd): class CompilationOptions(object): """ Options to the Cython compiler: - + show_version boolean Display version number use_listing_file boolean Generate a .lis file errors_to_stderr boolean Echo errors to stderr when using .lis @@ -637,10 +637,10 @@ class CompilationOptions(object): compiler_directives dict Overrides for pragma options (see Options.py) evaluate_tree_assertions boolean Test support: evaluate parse tree assertions language_level integer The Python language level: 2 or 3 - + cplus boolean Compile as c++ code """ - + def __init__(self, defaults = None, **kw): self.include_path = [] if defaults: @@ -659,7 +659,7 @@ def create_context(self): class CompilationResult(object): """ Results from the Cython compiler: - + c_file string or None The generated C source file h_file string or None The generated C header file i_file string or None The generated .pxi file @@ -670,7 +670,7 @@ class CompilationResult(object): num_errors integer Number of compilation errors compilation_source CompilationSource """ - + def __init__(self): self.c_file = None self.h_file = None @@ -687,10 +687,10 @@ class CompilationResultSet(dict): Results from compiling multiple Pyrex source files. A mapping from source file paths to CompilationResult instances. Also has the following attributes: - + num_errors integer Total number of compilation errors """ - + num_errors = 0 def add(self, source, result): @@ -701,7 +701,7 @@ def add(self, source, result): def compile_single(source, options, full_module_name = None): """ compile_single(source, options, full_module_name) - + Compile the given Pyrex implementation file and return a CompilationResult. Always compiles a single file; does not perform timestamp checking or recursion. @@ -712,7 +712,7 @@ def compile_single(source, options, full_module_name = None): def compile_multiple(sources, options): """ compile_multiple(sources, options) - + Compiles the given sequence of Pyrex implementation files and returns a CompilationResultSet. Performs timestamp checking and/or recursion if these are specified in the options. @@ -750,7 +750,7 @@ def compile_multiple(sources, options): def compile(source, options = None, full_module_name = None, **kwds): """ compile(source [, options], [,