diff options
author | Armin Rigo <arigo@tunes.org> | 2005-01-23 11:01:26 +0000 |
---|---|---|
committer | Armin Rigo <arigo@tunes.org> | 2005-01-23 11:01:26 +0000 |
commit | 8dec98500265ce72ba7ccdf03ca51e33539b1f87 (patch) | |
tree | bee56f3f1ebd32eb10d3b9a85f6b8a77dbe6752c | |
parent | finally get rid of the 'xpy' dependency in favor (diff) | |
download | pypy-8dec98500265ce72ba7ccdf03ca51e33539b1f87.tar.gz pypy-8dec98500265ce72ba7ccdf03ca51e33539b1f87.tar.bz2 pypy-8dec98500265ce72ba7ccdf03ca51e33539b1f87.zip |
Merge, step 3: delete the head!
343 files changed, 0 insertions, 52574 deletions
diff --git a/pypy/TODO b/pypy/TODO deleted file mode 100644 index 763bcb3c7e..0000000000 --- a/pypy/TODO +++ /dev/null @@ -1,87 +0,0 @@ -General -======= - -* missing pieces currently borrowed from CPython: the bytecode - compiler, a number of C modules, and some types -- as printed - when running py.py: "faking <type xyz>". - -* not all of CPython's exceptions use the same __init__! - -* refactor the cmdline-entrypoints to PyPy aka py.py main.py - interactive.py and traceinteractive.py towards a unified - cmdline tool (py.py) possibly including translator-related - things too. - -* cleanup the contents of the tool directory - -* an oldie: move interpreter/unittest_w.py somewhere more appropriate - and give it a better name. - -* (documentation) remove/retire all web-pages referencing e.g. - AnnSpace or other deprecated stuff - -* Review, enhance the new mixed-level module mechanism - (e.g. the module/builtin*.py files comprise the builtin module) - and try to apply the same technique for the application level - type definitions (types.py). Think about a way to split up - especially app-level definitions of __builtin__ into multiple - files. (like allowing __builtin__ to be a directory with files - comprising the complete definition of the builtin module etc.pp) - -* review whatever you like - -StdObjSpace -=========== - -* find a way via hacking or elegance to run CPython's unit tests - against the StdObjSpace. - -* String formatting is agonizingly slow. - -* Provide an importer that can import packages. (we have a - limited __import__ builtin defined) <---- done now? - - Consider PEP 302, new import hooks. - Try to write as much as possible in app-level. - How would PyPy import CPython extensions? - -* (documentation) generate a nice dot-graph from the structure of PyPy - -* port pypy's testing framework to py.test (probably a sprint topic, - as some discussion how to do it is required) - -* clear out and do a clean implementation of multimethod delegation. - The idea is to give 'kinds' to arguments according to their use, - e.g. 'numeric argument' or 'object whose identity is preserved'. - A 'kind' encapsulates a set of delegation functions. Armin has - already written documentation for the envisioned new multimethod - implementation: - - http://codespeak.net/pypy/index.cgi?doc/objspace/multimethod - - (now irrelevant? not sure... (nowadays, we have the descriptor - mechanism implemented right before EuroPython 2004 for both the - trivial and the stdobjspace)) - -Translator -========== - -* enhance the translator components to accept more of PyPy ... - -Tools -===== - -* add web server thread (!) that allows inspection of e.g. - interpreter-level tracebacks (and stop clogging the user's terminal - with them). --> there is some preliminary working code in - tool/tb_server - -* Interpreter level tracebacks are spectacular in their uselessness, - especially since the advent of descroperation. It should be - possible to construct something like an app-level traceback even - when there's no OperationError. - -* improve traceobjectspace! currently it only catches - calls from outside the space into the space but not - the ones where space operations involve calling more space - operations (the latter are not traced)! (fixed?) diff --git a/pypy/__init__.py b/pypy/__init__.py deleted file mode 100644 index 1bb8bf6d7f..0000000000 --- a/pypy/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/pypy/annotation/__init__.py b/pypy/annotation/__init__.py deleted file mode 100644 index 792d600548..0000000000 --- a/pypy/annotation/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py deleted file mode 100644 index f810a97642..0000000000 --- a/pypy/annotation/binaryop.py +++ /dev/null @@ -1,310 +0,0 @@ -""" -Binary operations between SomeValues. -""" - -from pypy.annotation.pairtype import pair, pairtype -from pypy.annotation.model import SomeObject, SomeInteger, SomeBool -from pypy.annotation.model import SomeString, SomeChar, SomeList, SomeDict -from pypy.annotation.model import SomeTuple, SomeImpossibleValue -from pypy.annotation.model import SomeInstance, SomeBuiltin, SomeIterator -from pypy.annotation.model import SomePBC -from pypy.annotation.model import unionof, set, setunion, missing_operation -from pypy.annotation.factory import generalize -from pypy.annotation.bookkeeper import getbookkeeper -from pypy.annotation.classdef import isclassdef -from pypy.objspace.flow.model import Constant - -# convenience only! -def immutablevalue(x): - return getbookkeeper().immutablevalue(x) - -# XXX unify this with ObjSpace.MethodTable -BINARY_OPERATIONS = set(['add', 'sub', 'mul', 'div', 'mod', - 'and_', 'or_', 'xor', - 'getitem', 'setitem', - 'inplace_add', 'inplace_sub', - 'lt', 'le', 'eq', 'ne', 'gt', 'ge', 'is_', - 'union']) - -for opname in BINARY_OPERATIONS: - missing_operation(pairtype(SomeObject, SomeObject), opname) - -#class __extend__(pairtype(SomeFunction, SomeObject)): -# def union((obj1, obj2)): -# raise TypeError, "generalizing not allowed: %r AND %r" % (obj1, obj2) -# -#class __extend__(pairtype(SomeObject, SomeFunction)): -# def union((obj1, obj2)): -# raise TypeError, "generalizing not allowed: %r AND %r" % (obj2, obj1) - -class __extend__(pairtype(SomeObject, SomeObject)): - - def union((obj1, obj2)): - if obj1 == obj2: - return obj1 - else: - #if isinstance(obj1, SomeFunction) or \ - # isinstance(obj2, SomeFunction): - # raise TypeError, ("generalizing not allowed:" - # "%r AND %r" % (obj1, obj2)) - # - result = SomeObject() - # try to preserve the origin of SomeObjects - if obj1 == result: - return obj1 - elif obj2 == result: - return obj2 - else: - return result - - def inplace_add((obj1, obj2)): - return pair(obj1, obj2).add() # default - - def inplace_sub((obj1, obj2)): - return pair(obj1, obj2).sub() # default - - def lt((obj1, obj2)): - if obj1.is_constant() and obj2.is_constant(): - return immutablevalue(obj1.const < obj2.const) - else: - return SomeBool() - - def le((obj1, obj2)): - if obj1.is_constant() and obj2.is_constant(): - return immutablevalue(obj1.const <= obj2.const) - else: - return SomeBool() - - def eq((obj1, obj2)): - if obj1.is_constant() and obj2.is_constant(): - return immutablevalue(obj1.const == obj2.const) - else: - return SomeBool() - - def ne((obj1, obj2)): - if obj1.is_constant() and obj2.is_constant(): - return immutablevalue(obj1.const != obj2.const) - else: - return SomeBool() - - def gt((obj1, obj2)): - if obj1.is_constant() and obj2.is_constant(): - return immutablevalue(obj1.const > obj2.const) - else: - return SomeBool() - - def ge((obj1, obj2)): - if obj1.is_constant() and obj2.is_constant(): - return immutablevalue(obj1.const >= obj2.const) - else: - return SomeBool() - - def is_((obj1, obj2)): - # XXX assumption: for "X is Y" we for simplification - # assume that X is possibly variable and Y constant - # (and not the other way round) - r = SomeBool() - if obj2.is_constant(): - if obj1.is_constant(): - r.const = obj1.const is obj2.const - # XXX HACK HACK HACK - # XXX HACK HACK HACK - # XXX HACK HACK HACK - fn, block, i = getbookkeeper().position_key - annotator = getbookkeeper().annotator - op = block.operations[i] - assert op.opname == "is_" - assert len(op.args) == 2 - assert annotator.binding(op.args[0]) == obj1 - r.knowntypedata = (op.args[0], obj2) - return r - -class __extend__(pairtype(SomeInteger, SomeInteger)): - # unsignedness is considered a rare and contagious disease - - def union((int1, int2)): - return SomeInteger(nonneg = int1.nonneg and int2.nonneg, - unsigned = int1.unsigned or int2.unsigned) - - add = mul = div = mod = or_ = xor = union - - def sub((int1, int2)): - return SomeInteger(unsigned = int1.unsigned or int2.unsigned) - - def and_((int1, int2)): - return SomeInteger(nonneg = int1.nonneg or int1.nonneg, - unsigned = int1.unsigned or int2.unsigned) - - -class __extend__(pairtype(SomeBool, SomeBool)): - - def union((boo1, boo2)): - s = SomeBool() - if getattr(boo1, 'const', -1) == getattr(boo2, 'const', -2): - s.const = boo1.const - if hasattr(boo1, 'knowntypedata') and \ - hasattr(boo2, 'knowntypedata') and \ - boo1.knowntypedata[0] == boo2.knowntypedata[0]: - s.knowntypedata = ( - boo1.knowntypedata[0], - unionof(boo1.knowntypedata[1], boo2.knowntypedata[1])) - return s - -class __extend__(pairtype(SomeString, SomeString)): - - def union((str1, str2)): - return SomeString() - - def add((str1, str2)): - return SomeString() - - -class __extend__(pairtype(SomeString, SomeObject)): - - def mod((str, args)): - return SomeString() - - -class __extend__(pairtype(SomeList, SomeList)): - - def union((lst1, lst2)): - return SomeList(setunion(lst1.factories, lst2.factories), - s_item = unionof(lst1.s_item, lst2.s_item)) - - add = union - - -class __extend__(pairtype(SomeList, SomeObject)): - - def inplace_add((lst1, obj2)): - s_iter = obj2.iter() - pair(lst1, SomeInteger()).setitem(s_iter.next()) - return lst1 - - -class __extend__(pairtype(SomeTuple, SomeTuple)): - - def union((tup1, tup2)): - if len(tup1.items) != len(tup2.items): - return SomeObject() - else: - unions = [unionof(x,y) for x,y in zip(tup1.items, tup2.items)] - return SomeTuple(items = unions) - - def add((tup1, tup2)): - return SomeTuple(items = tup1.items + tup2.items) - - -class __extend__(pairtype(SomeDict, SomeDict)): - - def union((dic1, dic2)): - return SomeDict(setunion(dic1.factories, dic2.factories), - unionof(dic1.s_key, dic2.s_key), - unionof(dic1.s_value, dic2.s_value)) - - -class __extend__(pairtype(SomeDict, SomeObject)): - - def getitem((dic1, obj2)): - return dic1.s_value - - def setitem((dic1, obj2), s_value): - generalize(dic1.factories, obj2, s_value) - - -class __extend__(pairtype(SomeTuple, SomeInteger)): - - def getitem((tup1, int2)): - if int2.is_constant(): - try: - return tup1.items[int2.const] - except IndexError: - return SomeImpossibleValue() - else: - return unionof(*tup1.items) - - -class __extend__(pairtype(SomeList, SomeInteger)): - - def mul((lst1, int2)): - return lst1 - - def getitem((lst1, int2)): - return lst1.s_item - - def setitem((lst1, int2), s_value): - generalize(lst1.factories, s_value) - - -class __extend__(pairtype(SomeString, SomeInteger)): - - def getitem((str1, int2)): - return SomeChar() - - -class __extend__(pairtype(SomeInteger, SomeList)): - - def mul((int1, lst2)): - return lst2 - - -class __extend__(pairtype(SomeInstance, SomeInstance)): - - def union((ins1, ins2)): - basedef = ins1.classdef.commonbase(ins2.classdef) - if basedef is None: - # print warning? - return SomeObject() - return SomeInstance(basedef) - -class __extend__(pairtype(SomeIterator, SomeIterator)): - - def union((iter1, iter2)): - return SomeIterator(unionof(iter1.s_item, iter2.s_item)) - - -class __extend__(pairtype(SomeBuiltin, SomeBuiltin)): - - def union((bltn1, bltn2)): - if bltn1.analyser != bltn2.analyser: - assert False, "merging incompatible builtins == BAD!" - return SomeObject() - else: - s_self = unionof(bltn1.s_self, bltn2.s_self) - return SomeBuiltin(bltn1.analyser, s_self) - -class __extend__(pairtype(SomePBC, SomePBC)): - def union((pbc1, pbc2)): - if len(pbc2.prebuiltinstances) > len(pbc1.prebuiltinstances): - pbc1, pbc2 = pbc2, pbc1 - d = pbc1.prebuiltinstances.copy() - for x, classdef in pbc2.prebuiltinstances.items(): - if x in d: - if bool(isclassdef(classdef)) ^ bool(isclassdef(d[x])): - raise Exception( - "union failed for %r with classdefs %r and %r" % - (x, classdef, d[x])) - if isclassdef(classdef): - classdef = classdef.commonbase(d[x]) - d[x] = classdef - return SomePBC(d) - -class __extend__(pairtype(SomeImpossibleValue, SomeObject)): - def union((imp1, obj2)): - return obj2 - -class __extend__(pairtype(SomeObject, SomeImpossibleValue)): - def union((obj1, imp2)): - return obj1 - -class __extend__(pairtype(SomeInstance, SomePBC)): - def union((ins, pbc)): - classdef = ins.classdef.superdef_containing(pbc.knowntype) - if classdef is None: - # print warning? - return SomeObject() - return SomeInstance(classdef) - -class __extend__(pairtype(SomePBC, SomeInstance)): - def union((pbc, ins)): - return pair(ins, pbc).union() diff --git a/pypy/annotation/bookkeeper.py b/pypy/annotation/bookkeeper.py deleted file mode 100644 index fe9f6d41f8..0000000000 --- a/pypy/annotation/bookkeeper.py +++ /dev/null @@ -1,279 +0,0 @@ -""" -The Bookkeeper class. -""" - -from types import FunctionType, ClassType, MethodType -from types import BuiltinMethodType -from pypy.annotation.model import * -from pypy.annotation.classdef import ClassDef -from pypy.interpreter.miscutils import getthreadlocals -from pypy.tool.hack import func_with_new_name -from pypy.interpreter.pycode import CO_VARARGS - -class Bookkeeper: - """The log of choices that have been made while analysing the operations. - It ensures that the same 'choice objects' will be returned if we ask - again during reflowing. Like ExecutionContext, there is an implicit - Bookkeeper that can be obtained from a thread-local variable. - - Currently used for factories and user-defined classes.""" - - def __init__(self, annotator): - self.annotator = annotator - self.creationpoints = {} # map position-in-a-block to its Factory - self.userclasses = {} # map classes to ClassDefs - self.userclasseslist = []# userclasses.keys() in creation order - self.cachespecializations = {} - self.pbccache = {} - # import ordering hack - global BUILTIN_ANALYZERS - from pypy.annotation.builtin import BUILTIN_ANALYZERS - - def enter(self, position_key): - """Start of an operation. - The operation is uniquely identified by the given key.""" - self.position_key = position_key - getthreadlocals().bookkeeper = self - - def leave(self): - """End of an operation.""" - del getthreadlocals().bookkeeper - del self.position_key - - def is_in_an_operation(self): - return hasattr(self, 'position_key') - - def getfactory(self, factorycls): - """Get the Factory associated with the current position, - or build it if it doesn't exist yet.""" - try: - factory = self.creationpoints[self.position_key] - except KeyError: - factory = factorycls() - factory.bookkeeper = self - factory.position_key = self.position_key - self.creationpoints[self.position_key] = factory - assert isinstance(factory, factorycls) - return factory - - def getclassdef(self, cls): - """Get the ClassDef associated with the given user cls.""" - if cls is object: - return None - try: - return self.userclasses[cls] - except KeyError: - cdef = ClassDef(cls, self) - self.userclasses[cls] = cdef - self.userclasseslist.append(cdef) - return self.userclasses[cls] - - - def immutablevalue(self, x): - """The most precise SomeValue instance that contains the - immutable value x.""" - tp = type(x) - if tp is bool: - result = SomeBool() - elif tp is int: - result = SomeInteger(nonneg = x>=0) - elif tp is str: - result = SomeString() - elif tp is tuple: - result = SomeTuple(items = [self.immutablevalue(e) for e in x]) - elif tp is list: - items_s = [self.immutablevalue(e) for e in x] - result = SomeList({}, unionof(*items_s)) - elif tp is dict: # exactly a dict - keys_s = [self.immutablevalue(e) for e in x.keys()] - values_s = [self.immutablevalue(e) for e in x.values()] - result = SomeDict({}, unionof(*keys_s), unionof(*values_s)) - elif ishashable(x) and x in BUILTIN_ANALYZERS: - result = SomeBuiltin(BUILTIN_ANALYZERS[x]) - elif callable(x) or isinstance(x, staticmethod): # XXX - # maybe 'x' is a method bound to a not-yet-frozen cache? - # fun fun fun. - if (hasattr(x, 'im_self') and isinstance(x.im_self, Cache) - and not x.im_self.frozen): - x.im_self.freeze() - if hasattr(x, '__self__') and x.__self__ is not None: - s_self = self.immutablevalue(x.__self__) - try: - result = s_self.find_method(x.__name__) - except AttributeError: - result = SomeObject() - else: - return self.getpbc(x) - elif hasattr(x, '__class__') \ - and x.__class__.__module__ != '__builtin__': - if isinstance(x, Cache) and not x.frozen: - x.freeze() - return self.getpbc(x) - elif x is None: - return self.getpbc(None) - else: - result = SomeObject() - result.const = x - return result - - def getpbc(self, x): - try: - # this is not just an optimization, but needed to avoid - # infinitely repeated calls to add_source_for_attribute() - return self.pbccache[x] - except KeyError: - result = SomePBC({x: True}) # pre-built inst - clsdef = self.getclassdef(new_or_old_class(x)) - for attr in getattr(x, '__dict__', {}): - clsdef.add_source_for_attribute(attr, x) - self.pbccache[x] = result - return result - - def valueoftype(self, t): - """The most precise SomeValue instance that contains all - objects of type t.""" - if t is bool: - return SomeBool() - elif t is int: - return SomeInteger() - elif t is str: - return SomeString() - elif t is list: - return SomeList(factories={}) - # can't do dict, tuple - elif isinstance(t, (type, ClassType)) and \ - t.__module__ != '__builtin__': - classdef = self.getclassdef(t) - if self.is_in_an_operation(): - # woha! instantiating a "mutable" SomeXxx like - # SomeInstance is always dangerous, because we need to - # allow reflowing from the current operation if/when - # the classdef later changes. - classdef.instantiation_locations[self.position_key] = True - return SomeInstance(classdef) - else: - o = SomeObject() - o.knowntype = t - return o - - def pycall(self, func, args): - if func is None: # consider None as a NULL function pointer - return SomeImpossibleValue() - if isinstance(func, (type, ClassType)) and \ - func.__module__ != '__builtin__': - cls = func - x = getattr(cls, "_specialize_", False) - if x: - if x == "location": - cls = self.specialize_by_key(cls, self.position_key) - else: - raise Exception, \ - "unsupported specialization type '%s'"%(x,) - - classdef = self.getclassdef(cls) - classdef.instantiation_locations[self.position_key] = True - s_instance = SomeInstance(classdef) - # flow into __init__() if the class has got one - init = getattr(cls, '__init__', None) - if init is not None and init != object.__init__: - # don't record the access of __init__ on the classdef - # because it is not a dynamic attribute look-up, but - # merely a static function call - if hasattr(init, 'im_func'): - init = init.im_func - else: - assert isinstance(init, BuiltinMethodType) - s_init = self.immutablevalue(init) - s_init.call(args.prepend(s_instance)) - else: - try: - args.fixedunpack(0) - except ValueError: - raise Exception, "no __init__ found in %r" % (cls,) - return s_instance - if hasattr(func, '__call__') and \ - isinstance(func.__call__, MethodType): - func = func.__call__ - if hasattr(func, 'im_func'): - if func.im_self is not None: - s_self = self.immutablevalue(func.im_self) - args = args.prepend(s_self) - # for debugging only, but useful to keep anyway: - try: - func.im_func.class_ = func.im_class - except AttributeError: - # probably a builtin function, we don't care to preserve - # class information then - pass - func = func.im_func - assert isinstance(func, FunctionType), "expected function, got %r"%func - # do we need to specialize this function in several versions? - x = getattr(func, '_specialize_', False) - #if not x: - # x = 'argtypes' - if x: - if x == 'argtypes': - key = short_type_name(args) - func = self.specialize_by_key(func, key, - func.__name__+'__'+key) - elif x == "location": - # fully specialize: create one version per call position - func = self.specialize_by_key(func, self.position_key) - else: - raise Exception, "unsupported specialization type '%s'"%(x,) - - elif func.func_code.co_flags & CO_VARARGS: - # calls to *arg functions: create one version per number of args - assert not args.kwds_w, ( - "keyword forbidden in calls to *arg functions") - nbargs = len(args.arguments_w) - if args.w_stararg is not None: - s_len = args.w_stararg.len() - assert s_len.is_constant(), "calls require known number of args" - nbargs += s_len.const - func = self.specialize_by_key(func, nbargs, - name='%s__%d' % (func.func_name, - nbargs)) - return self.annotator.recursivecall(func, self.position_key, args) - - def specialize_by_key(self, thing, key, name=None): - key = thing, key - try: - thing = self.cachespecializations[key] - except KeyError: - if isinstance(thing, FunctionType): - # XXX XXX XXX HAAAAAAAAAAAACK - self.annotator.translator.getflowgraph(thing) - thing = func_with_new_name(thing, name or thing.func_name) - elif isinstance(thing, (type, ClassType)): - assert not "not working yet" - thing = type(thing)(name or thing.__name__, (thing,)) - else: - raise Exception, "specializing %r?? why??"%thing - self.cachespecializations[key] = thing - return thing - - -def getbookkeeper(): - """Get the current Bookkeeper. - Only works during the analysis of an operation.""" - return getthreadlocals().bookkeeper - -def ishashable(x): - try: - hash(x) - except TypeError: - return False - else: - return True - -def short_type_name(args): - l = [] - shape, args_w = args.flatten() - for x in args_w: - if isinstance(x, SomeInstance) and hasattr(x, 'knowntype'): - name = "SI_" + x.knowntype.__name__ - else: - name = x.__class__.__name__ - l.append(name) - return "__".join(l) diff --git a/pypy/annotation/builtin.py b/pypy/annotation/builtin.py deleted file mode 100644 index 201415d017..0000000000 --- a/pypy/annotation/builtin.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Built-in functions. -""" - -import types -from pypy.annotation.model import SomeInteger, SomeObject, SomeChar, SomeBool -from pypy.annotation.model import SomeList, SomeString, SomeTuple -from pypy.annotation.bookkeeper import getbookkeeper -from pypy.annotation.factory import ListFactory -from pypy.objspace.flow.model import Constant -import pypy.objspace.std.restricted_int - -# convenience only! -def immutablevalue(x): - return getbookkeeper().immutablevalue(x) - -def builtin_len(s_obj): - return s_obj.len() - -def builtin_range(*args): - factory = getbookkeeper().getfactory(ListFactory) - factory.generalize(SomeInteger()) # XXX nonneg=... - return factory.create() - -def builtin_pow(s_base, s_exponent, *args): - if s_base.knowntype is s_exponent.knowntype is int: - return SomeInteger() - else: - return SomeObject() - -def builtin_int(s_obj): # we can consider 'int' as a function - return SomeInteger() - -def restricted_uint(s_obj): # for r_uint - return SomeInteger(nonneg=True, unsigned=True) - -def builtin_chr(s_int): - return SomeChar() - -def builtin_unicode(s_obj): - return SomeString() - -def builtin_float(s_obj): - return SomeObject() - -def builtin_long(s_str): - return SomeObject() - -def our_issubclass(cls1, cls2): - """ we're going to try to be less silly in the face of old-style classes""" - return cls2 is object or issubclass(cls1, cls2) - -def builtin_isinstance(s_obj, s_type): - s = SomeBool() - if s_type.is_constant(): - typ = s_type.const - # XXX bit of a hack: - if issubclass(typ, (int, long)): - typ = int - if s_obj.is_constant(): - s.const = isinstance(s_obj.const, typ) - elif our_issubclass(s_obj.knowntype, typ): - s.const = True - elif not our_issubclass(typ, s_obj.knowntype): - s.const = False - # XXX HACK HACK HACK - # XXX HACK HACK HACK - # XXX HACK HACK HACK - bk = getbookkeeper() - fn, block, i = bk.position_key - annotator = bk.annotator - op = block.operations[i] - assert op.opname == "simple_call" - assert len(op.args) == 3 - assert op.args[0] == Constant(isinstance) - assert annotator.binding(op.args[1]) == s_obj - s.knowntypedata = (op.args[1], bk.valueoftype(typ)) - return s - -def builtin_issubclass(s_cls1, s_cls2): - if s_cls1.is_constant() and s_cls2.is_constant(): - return immutablevalue(issubclass(s_cls1.const, s_cls2.const)) - else: - return SomeBool() - -def builtin_getattr(s_obj, s_attr, s_default=None): - if not s_attr.is_constant() or not isinstance(s_attr.const, str): - print "UN-RPYTHONIC-WARNING", \ - 'getattr(%r, %r) is not RPythonic enough' % (s_obj, s_attr) - return SomeObject() - return s_obj.getattr(s_attr) - -def builtin_hasattr(s_obj, s_attr): - if not s_attr.is_constant() or not isinstance(s_attr.const, str): - print "UN-RPYTHONIC-WARNING", \ - 'hasattr(%r, %r) is not RPythonic enough' % (s_obj, s_attr) - return SomeBool() - -def builtin_hash(s_obj): - return SomeInteger() - -def builtin_callable(s_obj): - return SomeBool() - -def builtin_tuple(s_iterable): - if isinstance(s_iterable, SomeTuple): - return s_iterable - return SomeObject() - -def builtin_type(s_obj, *moreargs): - if moreargs: - raise Exception, 'type() called with more than one argument' - if s_obj.is_constant(): - return immutablevalue(type(s_obj.const)) - return SomeObject() - -def builtin_str(s_obj): - return SomeString() - -def builtin_list(s_iterable): - factory = getbookkeeper().getfactory(ListFactory) - s_iter = s_iterable.iter() - factory.generalize(s_iter.next()) - return factory.create() - -def builtin_apply(*stuff): - print "XXX ignoring apply%r" % (stuff,) - return SomeObject() - -def builtin_compile(*stuff): - s = SomeObject() - s.knowntype = types.CodeType - return s - -def exception_init(s_self, *args): - s_self.setattr(immutablevalue('args'), SomeTuple(args)) - -# collect all functions -import __builtin__ -BUILTIN_ANALYZERS = {} -for name, value in globals().items(): - if name.startswith('builtin_'): - original = getattr(__builtin__, name[8:]) - BUILTIN_ANALYZERS[original] = value - -BUILTIN_ANALYZERS[pypy.objspace.std.restricted_int.r_int] = builtin_int -BUILTIN_ANALYZERS[pypy.objspace.std.restricted_int.r_uint] = restricted_uint -BUILTIN_ANALYZERS[Exception.__init__.im_func] = exception_init - diff --git a/pypy/annotation/classdef.py b/pypy/annotation/classdef.py deleted file mode 100644 index 47ca62e013..0000000000 --- a/pypy/annotation/classdef.py +++ /dev/null @@ -1,161 +0,0 @@ -""" -Type inference for user-defined classes. -""" - -from __future__ import generators -from types import FunctionType -from pypy.annotation.model import SomeImpossibleValue, unionof - - -class Attribute: - # readonly-ness - # SomeThing-ness - # more potential sources (pbcs or classes) of information - - def __init__(self, name, bookkeeper): - self.name = name - self.bookkeeper = bookkeeper - self.sources = {} # source -> None or ClassDef - # XXX a SomeImpossibleValue() constant? later!! - self.s_value = SomeImpossibleValue() - self.readonly = True - - def getvalue(self): - while self.sources: - source, classdef = self.sources.popitem() - s_value = self.bookkeeper.immutablevalue( - source.__dict__[self.name]) - if classdef: - s_value = s_value.bindcallables(classdef) - self.s_value = unionof(self.s_value, s_value) - return self.s_value - - def merge(self, other): - assert self.name == other.name - self.sources.update(other.sources) - self.s_value = unionof(self.s_value, other.s_value) - self.readonly = self.readonly and other.readonly - - -class ClassDef: - "Wraps a user class." - - def __init__(self, cls, bookkeeper): - self.bookkeeper = bookkeeper - self.attrs = {} # {name: Attribute} - self.revision = 0 # which increases the revision number - self.instantiation_locations = {} - self.cls = cls - self.subdefs = {} - assert (len(cls.__bases__) <= 1 or - cls.__bases__[1:] == (object,), # for baseobjspace.Wrappable - "single inheritance only right now: %r" % (cls,)) - if cls.__bases__: - base = cls.__bases__[0] - else: - base = object - self.basedef = bookkeeper.getclassdef(base) - if self.basedef: - self.basedef.subdefs[cls] = self - - # collect the (supposed constant) class attributes - for name, value in cls.__dict__.items(): - # ignore some special attributes - if name.startswith('_') and not isinstance(value, FunctionType): - continue - if isinstance(value, FunctionType): - value.class_ = cls # remember that this is really a method - self.add_source_for_attribute(name, cls, self) - - def add_source_for_attribute(self, attr, source, clsdef=None): - self.find_attribute(attr).sources[source] = clsdef - - def locate_attribute(self, attr): - for cdef in self.getmro(): - if attr in cdef.attrs: - return cdef - self.generalize_attr(attr) - return self - - def find_attribute(self, attr): - return self.locate_attribute(attr).attrs[attr] - - def __repr__(self): - return "<ClassDef '%s.%s'>" % (self.cls.__module__, self.cls.__name__) - - def commonbase(self, other): - while other is not None and not issubclass(self.cls, other.cls): - other = other.basedef - return other - - def superdef_containing(self, cls): - clsdef = self - while clsdef is not None and not issubclass(cls, clsdef.cls): - clsdef = clsdef.basedef - return clsdef - - def getmro(self): - while self is not None: - yield self - self = self.basedef - - def getallsubdefs(self): - pending = [self] - seen = {} - for clsdef in pending: - yield clsdef - for sub in clsdef.subdefs.values(): - if sub not in seen: - pending.append(sub) - seen[sub] = True - - def getallinstantiations(self): - locations = {} - for clsdef in self.getallsubdefs(): - locations.update(clsdef.instantiation_locations) - return locations - - def _generalize_attr(self, attr, s_value): - # first remove the attribute from subclasses -- including us! - subclass_attrs = [] - for subdef in self.getallsubdefs(): - if attr in subdef.attrs: - subclass_attrs.append(subdef.attrs[attr]) - del subdef.attrs[attr] - # bump the revision number of this class and all subclasses - subdef.revision += 1 - - # do the generalization - newattr = Attribute(attr, self.bookkeeper) - if s_value: - newattr.s_value = s_value - - for subattr in subclass_attrs: - newattr.merge(subattr) - self.attrs[attr] = newattr - - # reflow from all factories - for position in self.getallinstantiations(): - self.bookkeeper.annotator.reflowfromposition(position) - - def generalize_attr(self, attr, s_value=None): - # if the attribute exists in a superclass, generalize there. - for clsdef in self.getmro(): - if attr in clsdef.attrs: - clsdef._generalize_attr(attr, s_value) - else: - self._generalize_attr(attr, s_value) - - def about_attribute(self, name): - for cdef in self.getmro(): - if name in cdef.attrs: - s_result = cdef.attrs[name].s_value - if s_result != SomeImpossibleValue(): - return s_result - else: - return None - return None - - -def isclassdef(x): - return isinstance(x, ClassDef) diff --git a/pypy/annotation/factory.py b/pypy/annotation/factory.py deleted file mode 100644 index 124083ed2e..0000000000 --- a/pypy/annotation/factory.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Mutable Objects Factories. - -A factory is associated to an SpaceOperation in the source that creates a -built-in mutable object: currently 'newlist' and 'newdict'. -The factory remembers how general an object it has to create here. -""" - -from pypy.annotation.model import SomeList, SomeDict -from pypy.annotation.model import SomeImpossibleValue, unionof -from pypy.annotation.bookkeeper import getbookkeeper - - -class BlockedInference(Exception): - """This exception signals the type inference engine that the situation - is currently blocked, and that it should try to progress elsewhere.""" - - def __init__(self): - try: - self.break_at = getbookkeeper().position_key - except AttributeError: - self.break_at = None - - def __repr__(self): - return "<BlockedInference break_at %r>" %(self.break_at,) - __str__ = __repr__ - - -class ListFactory: - s_item = SomeImpossibleValue() - - def __repr__(self): - return '%s(s_item=%r)' % (self.__class__.__name__, self.s_item) - - def create(self): - return SomeList(factories = {self: True}, s_item = self.s_item) - - def generalize(self, s_new_item): - if not self.s_item.contains(s_new_item): - self.s_item = unionof(self.s_item, s_new_item) - return True - else: - return False - - -class DictFactory: - s_key = SomeImpossibleValue() - s_value = SomeImpossibleValue() - - def __repr__(self): - return '%s(s_key=%r, s_value=%r)' % (self.__class__.__name__, - self.s_key, self.s_value) - - def create(self): - return SomeDict(factories = {self: True}, - s_key = self.s_key, - s_value = self.s_value) - - def generalize(self, s_new_key, s_new_value): - if (self.s_key.contains(s_new_key) and - self.s_value.contains(s_new_value)): - return False - self.s_key = unionof(self.s_key, s_new_key) - self.s_value = unionof(self.s_value, s_new_value) - return True - - -def generalize(factories, *args): - """Signals all the factories in the given set to generalize themselves. - The args must match the signature of the generalize() method of the - particular factories (which should all be of the same class). - """ - modified = [factory for factory in factories if factory.generalize(*args)] - if modified: - for factory in modified: - factory.bookkeeper.annotator.reflowfromposition(factory.position_key) - raise BlockedInference # reflow now diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py deleted file mode 100644 index 0ac97a3df0..0000000000 --- a/pypy/annotation/model.py +++ /dev/null @@ -1,277 +0,0 @@ -""" -This file defines the 'subset' SomeValue classes. - -An instance of a SomeValue class stands for a Python object that has some -known properties, for example that is known to be a list of non-negative -integers. Each instance can be considered as an object that is only -'partially defined'. Another point of view is that each instance is a -generic element in some specific subset of the set of all objects. - -""" - -# Old terminology still in use here and there: -# SomeValue means one of the SomeXxx classes in this file. -# Cell is an instance of one of these classes. -# -# Think about cells as potato-shaped circles in a diagram: -# ______________________________________________________ -# / SomeObject() \ -# / ___________________________ ______________ \ -# | / SomeInteger(nonneg=False) \____ / SomeString() \ \ -# | / __________________________ \ | | | -# | | / SomeInteger(nonneg=True) \ | | "hello" | | -# | | | 0 42 _________/ | \______________/ | -# | \ -3 \________________/ / | -# \ \ -5 _____/ / -# \ \________________________/ 3.1416 / -# \_____________________________________________________/ -# - - -from types import ClassType, BuiltinFunctionType, FunctionType, MethodType -from types import InstanceType -import pypy -from pypy.annotation.pairtype import pair, extendabletype -from pypy.objspace.flow.model import Constant -from pypy.tool.cache import Cache -import inspect - - -DEBUG = True # set to False to disable recording of debugging information - - -class SomeObject: - """The set of all objects. Each instance stands - for an arbitrary object about which nothing is known.""" - __metaclass__ = extendabletype - knowntype = object - def __eq__(self, other): - return (self.__class__ is other.__class__ and - self.__dict__ == other.__dict__) - def __ne__(self, other): - return not (self == other) - def __repr__(self): - items = self.__dict__.items() - items.sort() - args = [] - for k, v in items: - m = getattr(self, 'fmt_' + k, repr) - r = m(v) - if r is not None: - args.append('%s=%s'%(k, r)) - kwds = ', '.join(args) - return '%s(%s)' % (self.__class__.__name__, kwds) - - def fmt_knowntype(self, t): - return t.__name__ - - def contains(self, other): - return self == other or pair(self, other).union() == self - def is_constant(self): - return hasattr(self, 'const') - - # for debugging, record where each instance comes from - # this is disabled if DEBUG is set to False - _coming_from = {} - def __new__(cls, *args, **kw): - self = super(SomeObject, cls).__new__(cls, *args, **kw) - if DEBUG: - try: - bookkeeper = pypy.annotation.bookkeeper.getbookkeeper() - position_key = bookkeeper.position_key - except AttributeError: - pass - else: - SomeObject._coming_from[id(self)] = position_key, None - return self - def origin(self): - return SomeObject._coming_from.get(id(self), (None, None))[0] - origin = property(origin) - def caused_by_merge(self): - return SomeObject._coming_from.get(id(self), (None, None))[1] - def set_caused_by_merge(self, nvalue): - SomeObject._coming_from[id(self)] = self.origin, nvalue - caused_by_merge = property(caused_by_merge, set_caused_by_merge) - del set_caused_by_merge - - -class SomeInteger(SomeObject): - "Stands for an object which is known to be an integer." - knowntype = int - def __init__(self, nonneg=False, unsigned=False): - self.nonneg = nonneg - self.unsigned = unsigned # pypy.objspace.std.restricted_int.r_uint - - -class SomeBool(SomeInteger): - "Stands for true or false." - knowntype = bool - nonneg = True - unsigned = False - def __init__(self): - pass - - -class SomeString(SomeObject): - "Stands for an object which is known to be a string." - knowntype = str - - -class SomeChar(SomeString): - "Stands for an object known to be a string of length 1." - - -class SomeList(SomeObject): - "Stands for a homogenous list of any length." - knowntype = list - def __init__(self, factories, s_item=SomeObject()): - self.factories = factories - self.s_item = s_item # general enough for any element - - -class SomeTuple(SomeObject): - "Stands for a tuple of known length." - knowntype = tuple - def __init__(self, items): - self.items = tuple(items) # tuple of s_xxx elements - for i in items: - if not i.is_constant(): - break - else: - self.const = tuple([i.const for i in items]) - - -class SomeDict(SomeObject): - "Stands for a dict." - knowntype = dict - def __init__(self, factories, s_key, s_value): - self.factories = factories - self.s_key = s_key - self.s_value = s_value - - -class SomeIterator(SomeObject): - "Stands for an iterator returning objects of a known type." - knowntype = type(iter([])) # arbitrarily chose seqiter as the type - def __init__(self, s_item=SomeObject()): - self.s_item = s_item - - -class SomeInstance(SomeObject): - "Stands for an instance of a (user-defined) class." - def __init__(self, classdef): - self.classdef = classdef - self.knowntype = classdef.cls - self.revision = classdef.revision - def fmt_knowntype(self, kt): - return None - def fmt_classdef(self, cd): - return cd.cls.__name__ - -def new_or_old_class(c): - if hasattr(c, '__class__'): - return c.__class__ - else: - return type(c) - -class SomePBC(SomeObject): - """Stands for a global user instance, built prior to the analysis, - or a set of such instances.""" - def __init__(self, prebuiltinstances): - # prebuiltinstances is a dictionary containing concrete python - # objects as keys. - # if the key is a function, the value can be a classdef to - # indicate that it is really a method. - prebuiltinstances = prebuiltinstances.copy() - self.prebuiltinstances = prebuiltinstances - self.simplify() - self.knowntype = reduce(commonbase, - [new_or_old_class(x) for x in prebuiltinstances]) - if prebuiltinstances.values() == [True]: - # hack for the convenience of direct callers to SomePBC(): - # only if there is a single object in prebuiltinstances and - # it doesn't have an associated ClassDef - self.const, = prebuiltinstances - def simplify(self): - # We check that the dictionary does not contain at the same time - # a function bound to a classdef, and constant bound method objects - # on that class. - for x, ignored in self.prebuiltinstances.items(): - if isinstance(x, MethodType) and x.im_func in self.prebuiltinstances: - classdef = self.prebuiltinstances[x.im_func] - if isinstance(x.im_self, classdef.cls): - del self.prebuiltinstances[x] - - def fmt_prebuiltinstances(self, pbis): - if hasattr(self, 'const'): - return None - else: - return '{...%s...}'%(len(pbis),) - - def fmt_knowntype(self, kt): - if self.is_constant(): - return None - else: - return kt.__name__ - -class SomeBuiltin(SomeObject): - "Stands for a built-in function or method with special-cased analysis." - knowntype = BuiltinFunctionType # == BuiltinMethodType - def __init__(self, analyser, s_self=None): - self.analyser = analyser - self.s_self = s_self - - -class SomeImpossibleValue(SomeObject): - """The empty set. Instances are placeholders for objects that - will never show up at run-time, e.g. elements of an empty list.""" - - -def unionof(*somevalues): - "The most precise SomeValue instance that contains all the values." - s1 = SomeImpossibleValue() - for s2 in somevalues: - if s1 != s2: - s1 = pair(s1, s2).union() - if DEBUG and s1.caused_by_merge is None and len(somevalues) > 1: - s1.caused_by_merge = somevalues - return s1 - -# ____________________________________________________________ -# internal - -def setunion(d1, d2): - "Union of two sets represented as dictionaries." - d = d1.copy() - d.update(d2) - return d - -def set(it): - "Turn an iterable into a set." - d = {} - for x in it: - d[x] = True - return d - -def commonbase(cls1, cls2): # XXX single inheritance only XXX hum - l1 = inspect.getmro(cls1) - l2 = inspect.getmro(cls2) - if l1[-1] != object: - l1 = l1 + (object,) - if l2[-1] != object: - l2 = l2 + (object,) - for x in l1: - if x in l2: - return x - assert 0, "couldn't get to commonbase of %r and %r" % (cls1, cls2) - -def missing_operation(cls, name): - def default_op(*args): - #print '* warning, no type available for %s(%s)' % ( - # name, ', '.join([repr(a) for a in args])) - return SomeObject() - setattr(cls, name, default_op) - -# this has the side-effect of registering the unary and binary operations -from pypy.annotation.unaryop import UNARY_OPERATIONS -from pypy.annotation.binaryop import BINARY_OPERATIONS diff --git a/pypy/annotation/pairtype.py b/pypy/annotation/pairtype.py deleted file mode 100644 index 4ab2288366..0000000000 --- a/pypy/annotation/pairtype.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -XXX A docstring should go here. -""" - -class extendabletype(type): - """A type with a syntax trick: 'class __extend__(t)' actually extends - the definition of 't' instead of creating a new subclass.""" - def __new__(cls, name, bases, dict): - if name == '__extend__': - cls = bases[0] # override data into the existing base - for key, value in dict.items(): - setattr(cls, key, value) - return None - else: - return super(extendabletype, cls).__new__(cls, name, bases, dict) - - -def pair(a, b): - """Return a pair object.""" - tp = pairtype(a.__class__, b.__class__) - return tp((a, b)) # tp is a subclass of tuple - -pairtypecache = {} - -def pairtype(cls1, cls2): - """type(pair(a,b)) is pairtype(a.__class__, b.__class__).""" - try: - pair = pairtypecache[cls1, cls2] - except KeyError: - name = 'pairtype(%s, %s)' % (cls1.__name__, cls2.__name__) - bases1 = [pairtype(base1, cls2) for base1 in cls1.__bases__] - bases2 = [pairtype(cls1, base2) for base2 in cls2.__bases__] - bases = tuple(bases1 + bases2) or (tuple,) # 'tuple': ultimate base - pair = pairtypecache[cls1, cls2] = extendabletype(name, bases, {}) - return pair diff --git a/pypy/annotation/test/__init__.py b/pypy/annotation/test/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/pypy/annotation/test/__init__.py +++ /dev/null diff --git a/pypy/annotation/test/autopath.py b/pypy/annotation/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/annotation/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/annotation/test/test_model.py b/pypy/annotation/test/test_model.py deleted file mode 100644 index 1649f24d35..0000000000 --- a/pypy/annotation/test/test_model.py +++ /dev/null @@ -1,63 +0,0 @@ - -import autopath -from pypy.annotation.model import * - - -s1 = SomeObject() -s2 = SomeInteger(nonneg=True) -s3 = SomeInteger(nonneg=False) -s4 = SomeList({}, SomeTuple([SomeInteger(nonneg=True), SomeString()])) -s5 = SomeList({}, SomeTuple([SomeInteger(nonneg=False), SomeString()])) -s6 = SomeImpossibleValue() -slist = [s1,s2,s3,s4,s5,s6] - -def test_equality(): - assert s1 != s2 != s3 != s4 != s5 != s6 - assert s1 == SomeObject() - assert s2 == SomeInteger(nonneg=True) - assert s3 == SomeInteger(nonneg=False) - assert s4 == SomeList({}, SomeTuple([SomeInteger(nonneg=True), SomeString()])) - assert s5 == SomeList({}, SomeTuple([SomeInteger(nonneg=False), SomeString()])) - assert s6 == SomeImpossibleValue() - -def test_contains(): - assert ([(s,t) for s in slist for t in slist if s.contains(t)] == - [(s1,s1), (s1,s2), (s1,s3), (s1,s4), (s1,s5), (s1,s6), - (s2,s2), (s2,s6), - (s3,s2), (s3,s3), (s3,s6), - (s4,s4), (s4,s6), - (s5,s4), (s5,s5), (s5,s6), - (s6,s6)]) - -def test_union(): - assert ([unionof(s,t) for s in slist for t in slist] == - [s1, s1, s1, s1, s1, s1, - s1, s2, s3, s1, s1, s2, - s1, s3, s3, s1, s1, s3, - s1, s1, s1, s4, s5, s4, - s1, s1, s1, s5, s5, s5, - s1, s2, s3, s4, s5, s6]) - -def test_commonbase_simple(): - class A0: - pass - class A1(A0): - pass - class A2(A0): - pass - class B1(object): - pass - class B2(object): - pass - class B3(object, A0): - pass - assert commonbase(A1,A2) is A0 - assert commonbase(A1,A0) is A0 - assert commonbase(A1,A1) is A1 - assert commonbase(A2,B2) is object - assert commonbase(A2,B3) is A0 - -if __name__ == '__main__': - for name, value in globals().items(): - if name.startswith('test_'): - value() diff --git a/pypy/annotation/test/test_pairtype.py b/pypy/annotation/test/test_pairtype.py deleted file mode 100644 index 6385c9c8db..0000000000 --- a/pypy/annotation/test/test_pairtype.py +++ /dev/null @@ -1,93 +0,0 @@ - -from pypy.annotation.pairtype import pairtype, pair - -def test_binop(): - ### Binary operation example - class __extend__(pairtype(int, int)): - def add((x, y)): - return 'integer: %s+%s' % (x, y) - def sub((x, y)): - return 'integer: %s-%s' % (x, y) - - class __extend__(pairtype(bool, bool)): - def add((x, y)): - return 'bool: %s+%s' % (x, y) - - assert pair(3,4).add() == 'integer: 3+4' - assert pair(3,4).sub() == 'integer: 3-4' - assert pair(3,True).add() == 'integer: 3+True' - assert pair(3,True).sub() == 'integer: 3-True' - assert pair(False,4).add() == 'integer: False+4' - assert pair(False,4).sub() == 'integer: False-4' - assert pair(False,True).add() == 'bool: False+True' - assert pair(False,True).sub() == 'integer: False-True' - -def test_somebuiltin(): - ### Operation on built-in types - class MiniPickler: - def __init__(self): - self.data = [] - def emit(self, datum): - self.data.append(datum) - - class __extend__(pairtype(MiniPickler, int)): - def write((pickler, x)): - pickler.emit('I%d' % x) - - class __extend__(pairtype(MiniPickler, str)): - def write((pickler, x)): - pickler.emit('S%s' % x) - - class __extend__(pairtype(MiniPickler, list)): - def write((pickler, x)): - for item in x: - pair(pickler, item).write() - pickler.emit('L%d' % len(x)) - - p = MiniPickler() - pair(p, [1, 2, ['hello', 3]]).write() - assert p.data == ['I1', 'I2', 'Shello', 'I3', 'L2', 'L3'] - -def test_some_multimethod(): - ### Another multimethod example - class Block: - def __init__(self, exit): - self.exit = exit - class Jump: - pass - class Switch: - pass - - class C_Generator: - def __init__(self): - self.lines = [] - - class __extend__(pairtype(C_Generator, Block)): - def emit((gen, block), inputvars): - gen.lines.append("C code for block") - outputvars = inputvars + ['v4', 'v5'] - pair(gen, block.exit).emit(outputvars) - - class __extend__(pairtype(C_Generator, Jump)): - def emit((gen, jump), inputvars): - gen.lines.append("goto xyz") - - class __extend__(pairtype(C_Generator, Switch)): - def emit((gen, jump), inputvars): - gen.lines.append("switch (%s) { ... }" % inputvars[-1]) - - g = C_Generator() - pair(g, Block(Switch())).emit(['v1', 'v2']) - assert g.lines == ["C code for block", "switch (v5) { ... }"] - - class Lisp_Generator: - def __init__(self): - self.progn = [] - - class __extend__(pairtype(Lisp_Generator, Block)): - def emit((gen, block), inputvars): - gen.progn.append("(do 'something)") - - g = Lisp_Generator() - pair(g, Block(Switch())).emit(['v1', 'v2']) - assert g.progn == ["(do 'something)"] diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py deleted file mode 100644 index 49c3592406..0000000000 --- a/pypy/annotation/unaryop.py +++ /dev/null @@ -1,258 +0,0 @@ -""" -Unary operations on SomeValues. -""" - -from types import FunctionType -from pypy.interpreter.argument import Arguments -from pypy.annotation.pairtype import pair -from pypy.annotation.model import SomeObject, SomeInteger, SomeBool -from pypy.annotation.model import SomeString, SomeChar, SomeList, SomeDict -from pypy.annotation.model import SomeTuple, SomeImpossibleValue -from pypy.annotation.model import SomeInstance, SomeBuiltin -from pypy.annotation.model import SomeIterator, SomePBC, new_or_old_class -from pypy.annotation.model import unionof, set, setunion, missing_operation -from pypy.annotation.factory import BlockedInference -from pypy.annotation.bookkeeper import getbookkeeper -from pypy.annotation.classdef import isclassdef - -# convenience only! -def immutablevalue(x): - return getbookkeeper().immutablevalue(x) - -UNARY_OPERATIONS = set(['len', 'is_true', 'getattr', 'setattr', - 'simple_call', 'call_args', - 'iter', 'next']) - -for opname in UNARY_OPERATIONS: - missing_operation(SomeObject, opname) - - -class __extend__(SomeObject): - - def len(obj): - return SomeInteger(nonneg=True) - - def is_true(obj): - if obj.is_constant(): - return immutablevalue(bool(obj.const)) - else: - s_len = obj.len() - if s_len.is_constant(): - return immutablevalue(s_len.const > 0) - else: - return SomeBool() - - def find_method(obj, name): - "Look for a special-case implementation for the named method." - analyser = getattr(obj.__class__, 'method_' + name) - return SomeBuiltin(analyser, obj) - - def getattr(obj, s_attr): - # get a SomeBuiltin if the SomeObject has - # a corresponding method to handle it - if s_attr.is_constant() and isinstance(s_attr.const, str): - attr = s_attr.const - try: - return obj.find_method(attr) - except AttributeError: - pass - # if the SomeObject is itself a constant, allow reading its attrs - if obj.is_constant() and hasattr(obj.const, attr): - return immutablevalue(getattr(obj.const, attr)) - return SomeObject() - - def bindcallables(obj, classdef): - return obj # default unbound __get__ implementation - - def simple_call(obj, *args_s): - space = RPythonCallsSpace() - return obj.call(Arguments(space, args_s)) - - def call_args(obj, s_shape, *args_s): - space = RPythonCallsSpace() - return obj.call(Arguments.fromshape(space, s_shape.const, args_s)) - - def call(obj, args): - #raise Exception, "cannot follow call_args%r" % (obj_and_args,) - print "*** cannot follow call(%r, %r)" % (obj, args) - return SomeObject() - - -class __extend__(SomeBool): - def is_true(self): - return self - -class __extend__(SomeTuple): - - def len(tup): - return immutablevalue(len(tup.items)) - - def iter(tup): - return SomeIterator(unionof(*tup.items)) - - -class __extend__(SomeList): - - def method_append(lst, s_value): - pair(lst, SomeInteger()).setitem(s_value) - - def method_reverse(lst): - pass - - def method_insert(lst, s_index, s_value): - pair(lst, SomeInteger()).setitem(s_value) - - def method_pop(lst, s_index=None): - return lst.s_item - - def iter(lst): - return SomeIterator(lst.s_item) - - -class __extend__(SomeString): - - def method_join(str, s_list): - return SomeString() - - def iter(str): - return SomeIterator(SomeChar()) - - -class __extend__(SomeChar): - - def len(chr): - return immutablevalue(1) - - -class __extend__(SomeIterator): - - def next(itr): - return itr.s_item - - -class __extend__(SomeInstance): - - def currentdef(ins): - if ins.revision != ins.classdef.revision: - #print ins.revision, ins.classdef.revision - raise BlockedInference - return ins.classdef - - def getattr(ins, s_attr): - if s_attr.is_constant() and isinstance(s_attr.const, str): - attr = s_attr.const - #print 'getattr:', ins, attr, ins.classdef.revision - s_result = ins.currentdef().find_attribute(attr).getvalue() - # we call this because it might raise BlockedInference if - # the above line caused generalization. - ins.currentdef() - return s_result - return SomeObject() - - def setattr(ins, s_attr, s_value): - if s_attr.is_constant() and isinstance(s_attr.const, str): - attr = s_attr.const - clsdef = ins.currentdef().locate_attribute(attr) - attrdef = clsdef.attrs[attr] - attrdef.readonly = False - - # if the attrdef is new, this must fail - if attrdef.getvalue().contains(s_value): - return - # create or update the attribute in clsdef - clsdef.generalize_attr(attr, s_value) - raise BlockedInference - return - - def contains(self, other): - # override the default contains() to ignore revision numbers - if self == other: - return True - s_union = pair(self, other).union() - return (isinstance(s_union, SomeInstance) and - s_union.classdef is self.classdef) - - -class __extend__(SomeBuiltin): - def simple_call(bltn, *args): - if bltn.s_self is not None: - return bltn.analyser(bltn.s_self, *args) - else: - return bltn.analyser(*args) - - -class __extend__(SomePBC): - - def getattr(pbc, s_attr): - bookkeeper = getbookkeeper() - assert s_attr.is_constant() - attr = s_attr.const - actuals = [] - for c in pbc.prebuiltinstances: - if hasattr(c, attr): - # force the attribute to be considered on the class - classdef = bookkeeper.getclassdef(new_or_old_class(c)) - classdef.find_attribute(attr).getvalue() - # but only return the more precise result getattr(c, attr) - actuals.append(immutablevalue(getattr(c, attr))) - return unionof(*actuals) - - def setattr(pbc, s_attr, s_value): - #raise Exception, "oops!" - print "*** WARNING: setattr not wanted on %r" % pbc - pass - - def call(pbc, args): - bookkeeper = getbookkeeper() - results = [] - for func, classdef in pbc.prebuiltinstances.items(): - if isclassdef(classdef): - # create s_self and record the creation in the factory - s_self = SomeInstance(classdef) - classdef.instantiation_locations[ - bookkeeper.position_key] = True - args1 = args.prepend(s_self) - else: - args1 = args - results.append(bookkeeper.pycall(func, args1)) - return unionof(*results) - - def bindcallables(pbc, classdef): - """ turn the callables in the given SomeCallable 'cal' - into bound versions. - """ - d = {} - for func, value in pbc.prebuiltinstances.items(): - if isinstance(func, FunctionType): - if isclassdef(value): - print ("!!! rebinding an already bound" - " method %r with %r" % (func, value)) - d[func] = classdef - elif isinstance(func, staticmethod): - d[func.__get__(43)] = value - else: - d[func] = value - return SomePBC(d) - - -class RPythonCallsSpace: - """Pseudo Object Space providing almost no real operation. - For the Arguments class: if it really needs other operations, it means - that the call pattern is too complex for R-Python. - """ - def newtuple(self, items_s): - return SomeTuple(items_s) - - def newdict(self, stuff): - raise CallPatternTooComplex, "'**' argument" - - def unpackiterable(self, s_obj, expected_length=None): - if isinstance(s_obj, SomeTuple): - if (expected_length is not None and - expected_length != len(s_obj.items)): - raise ValueError - return s_obj.items - raise CallPatternTooComplex, "'*' argument must be SomeTuple" - -class CallPatternTooComplex(Exception): - pass diff --git a/pypy/appspace/__init__.py b/pypy/appspace/__init__.py deleted file mode 100644 index 792d600548..0000000000 --- a/pypy/appspace/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# diff --git a/pypy/appspace/_file.py b/pypy/appspace/_file.py deleted file mode 100644 index 4a3a8db501..0000000000 --- a/pypy/appspace/_file.py +++ /dev/null @@ -1,75 +0,0 @@ -import sio - -class file_(object): - """An implementation of file objects in Python. it relies on Guido's - sio.py implementation. - """ - def __init__(self, name, mode='r', bufsize=None): - self.reading = False - self.writing = False - - if not mode: - raise IOError('invalid mode : ') - if mode[0] not in ['r', 'w', 'a', 'U']: - raise IOError('invalid mode : %s' % mode) - else: - if mode[0] in ['r', 'U']: - self.reading = True - else: - self.writing = True - try: - if mode[1] == 'b': - plus = mode[2] - else: - plus = mode[1] - if plus == '+': - self.reading = self.writing = True - except IndexError: - pass - - self._mode = mode - self._name = name - self._closed = False - self.softspace = 0 # Required according to file object docs - self._encoding = None # Fix when we find out how encoding should be - self.fd = sio.DiskFile(name, mode) - if mode in ['U', 'rU']: - # Wants universal newlines - self.fd = sio.TextInputFilter(self.fd) - if bufsize < 0: - bufsize = None - if not self.writing and (bufsize is None or bufsize > 0): - "Read only buffered stream." - self.fd = sio.BufferingInputStream(self.fd, bufsize) - if not self.reading: - if bufsize is None or bufsize > 1: - "Write only buffered stream." - self.fd = sio.BufferingOutputStream(self.fd, bufsize) - elif bufsize == 1: - self.fd = sio.LineBufferingOutputStream(self.fd) - if self.reading and self.writing: - if bufsize > 2: - "Read and write buffered stream." - self.fd = sio.BufferingInputOutputStream(self.fd, bufsize) - return self.fd - - def __getattr__(self, attr): - """ - Handle the readonly attributes and then delegate the other - methods to the underlying file object, setting the 'closed' - attribute if you close the file. - """ - if attr in ['fd', 'softspace', 'reading', 'writing']: - return self.__dict__[attr] - elif attr in ['mode', 'name', 'closed', 'encoding']: - return self.__dict__['_' + attr] - elif attr == 'close': - self._closed = True - - return getattr(self.fd, attr) - - def __setattr__(self, attr, val): - "Make some attributes readonly." - if attr in ['mode', 'name', 'closed', 'encoding']: - raise TypeError, "readonly attribute:'%s'" % attr - self.__dict__[attr] = val diff --git a/pypy/appspace/_float_formatting.py b/pypy/appspace/_float_formatting.py deleted file mode 100644 index 171ae61dea..0000000000 --- a/pypy/appspace/_float_formatting.py +++ /dev/null @@ -1,110 +0,0 @@ -import math - -# from the excessive effort department, routines for printing floating -# point numbers from - -# "Printing Floating-Point Numbers Quickly and Accurately" by Burger & -# Dybvig, Proceedings of the SIGPLAN '96 Conference on Programming -# Language Design and Implementation. - -# The paper contains scheme code which has been specialized for IEEE -# doubles and converted into Python by Michael Hudson. - -# XXX unfortunately, we need the fixed-format output routines, the source -# for which is not included in the paper... for now, just put up with -# occasionally incorrectly rounded final digits. I'll get to it. - -# XXX should run this at interpreter level, really.... - -def decode_float(f): - """decode_float(float) -> int, int - - Return (m, e), the mantissa and exponent respectively of - f (assuming f is an IEEE double), i.e. f == m * 2**e and - 2**52 <= m < 2**53.""" - m, e = math.frexp(f) - m = long(m*2.0**53) - e -= 53 - return m, e - -def decompose(f): - """decompose(float) -> int, int, int, int - - Return r, s, m_plus, m_minus for f, in the terms of - Burger and Dybvig's paper (see Table 1). - - To spell this out: f = r/s, (r+m+)/s is halfway to the - next largest floating point number, (r-m-) halfway to - the next smallest.""" - m, e = decode_float(f) - if e >= 0: - if not m != 2**52: - be = 2**e - return m*be*2, 2, be, be - else: - be = 2**e - be1 = 2*be - return m*be1*2, 4, be1, be - else: - if e == -1075 or m != 2**52: - return m*2, 2**(-e+1), 1, 1 - else: - return m*4, 2**(-e+2), 2, 1 - - -def flonum2digits(f): - """flonum2digits(float) -> [int], int - - Given a float f return [d1, ..., dn], k such that - - 0.[d1][d2]...[dn] * 10**k - - is the shortest decimal representation that will - reproduce f when read in by a correctly rounding input - routine (under any strategy for breaking ties).""" - - assert f >= 0 - if f == 0.0: - return ['0'], 1 - - # See decompose's docstring for what these mean. - r, s, m_plus, m_minus = decompose(f) - - # k is the index, relative to the radix point of the - # most-significant non-zero digit of the infinite - # decimal expansion of f. This calculation has the - # potential to underestimate by one (handled below). - k = long(math.ceil(math.log10(f) - 1e-10)) - - if k >= 0: - s *= 10 ** k - else: - scale = 10 ** -k - r *= scale - m_plus *= scale - m_minus *= scale - - # Check that we got k right above. - if r + m_plus > s: - s *= 10 - k += 1 - - # Generate the digits. - rr = [] - while 1: - d, r = divmod(r*10, s) - m_plus *= 10 - m_minus *= 10 - tc1 = r < m_minus - tc2 = (r + m_plus) > s - if tc2: - rr.append(d+1) - else: - rr.append(d) - if tc1 or tc2: - break - - assert max(rr) < 10 - assert min(rr) >= 0 - - return map(str, rr), k diff --git a/pypy/appspace/_formatting.py b/pypy/appspace/_formatting.py deleted file mode 100644 index e88ea067a7..0000000000 --- a/pypy/appspace/_formatting.py +++ /dev/null @@ -1,401 +0,0 @@ -# Application level implementation of string formatting. - -# There's some insane stuff in here. Blame CPython. Please. - -# Known problems: -# (1) rounding isn't always right (see comments in _float_formatting). -# (2) something goes wrong in the f_alt case of %g handling. -# (3) it's really, really slow. - -class _Flags(object): - def __repr__(self): - return "<%s>"%(', '.join([f for f in self.__dict__ - if f[0] == 'f' and getattr(self, f)]),) - f_ljust = 0 - f_sign = 0 - f_blank = 0 - f_alt = 0 - f_zero = 0 - - -def value_next(valueiter): - try: - return valueiter.next() - except StopIteration: - raise TypeError('not enough arguments for format string') - - -def peel_num(c, fmtiter, valueiter): - if c == '*': - v = value_next(valueiter) - if not isinstance(v, int): - raise TypeError, "* wants int" - return fmtiter.next(), v - n = '' - while c in '0123456789': - n += c - c = fmtiter.next() - if n: - return c, int(n) - else: - return c, 0 - - -def peel_flags(c, fmtiter): - flags = _Flags() - while 1: - if c == '-': - flags.f_ljust = True - elif c == '+': - flags.f_sign = True - elif c == ' ': - flags.f_blank = True - elif c == '#': - flags.f_alt = True - elif c == '0': - flags.f_zero = True - else: - break - c = fmtiter.next() - return c, flags - - -def parse_fmt(fmtiter, valueiter, valuedict): - """return (char, flags, width, prec, value) - partially consumes fmtiter & valueiter""" - c = fmtiter.next() - gotvalue = False - if c == '(': - n = '' - pcount = 1 - while 1: - c = fmtiter.next() - if c == ')': - pcount -= 1 - if pcount == 0: - break - elif c == '(': - pcount += 1 - n += c - value = valuedict[n] - gotvalue = True - c = fmtiter.next() - c, flags = peel_flags(c, fmtiter) - c, width = peel_num(c, fmtiter, valueiter) - if c == '.': - c, prec = peel_num(fmtiter.next(), fmtiter, valueiter) - else: - prec = None - if c in 'hlL': - c = fmtiter.next() - if width and width < 0: - # this can happen with *-args - flags.f_ljust = True - width = -width - if not gotvalue: - if c == '%': - # did YOU realize that "%4%"%() == ' %'?? - value = '%' - c = 's' - else: - value = value_next(valueiter) - return (c, flags, width, prec, value) - - -class Formatter(object): - def __init__(self, char, flags, width, prec, value): - self.char = char - self.flags = flags - self.width = width - self.prec = prec - self.value = value - - def numeric_preprocess(self, v): - # negative zeroes? - # * mwh giggles, falls over - # still, if we can recognize them, here's the place to do it. - import math - if v < 0 or v == 0 and math.atan2(0, v) != 0: - sign = '-' - v = -v - else: - if self.flags.f_sign: - sign = '+' - elif self.flags.f_blank: - sign = ' ' - else: - sign = '' - return v, sign - - def numeric_postprocess(self, r, sign): - assert self.char in 'iduoxXeEfFgG' - padchar = ' ' - if self.flags.f_zero: - padchar = '0' - if self.width is not None: - p = self.width - len(r) - len(sign) - if self.flags.f_ljust: - r = sign + r + ' '*p - else: - if self.flags.f_zero: - r = sign+padchar*p + r - else: - r = padchar*p + sign + r - else: - r = sign + r - return r - - def format(self): - raise NotImplementedError - - def std_wp(self, r): - assert self.char not in 'iduoxXeEfFgG' - if self.prec is not None: - r = r[:self.prec] - if self.width is not None: - p = self.width - len(r) - if self.flags.f_ljust: - r = r + ' '*p - else: - r = ' '*p + r - return r - - -def funcFormatter(*funcs): - class _F(Formatter): - def format(self): - r = self.value - for f in funcs: - r = f(r) - return self.std_wp(r) - return _F - - -def maybe_int(value): - try: - inter = value.__int__ - except AttributeError: - raise TypeError, "an integer argument is required" - return inter() - - -def maybe_float(value): - try: - floater = value.__float__ - except AttributeError: - raise TypeError, "a float argument is required" - return floater() - - -from _float_formatting import flonum2digits - -class FloatFormatter(Formatter): - def eDigits(self, ds): - ds = ds[:self.prec + 1] + ['0'] * (self.prec + 1 - len(ds)) - if self.prec > 0 or self.flags.f_alt: - ds[1:1] = ['.'] - return ''.join(ds) - - def fDigits(self, ds, k): - p = max(self.prec, 0) - if 0 < k < len(ds): - if len(ds) - k < p: - ds.extend(['0'] * (p - (len(ds) - k))) - else: - ds = ds[:p + k] - ds[k:k] = ['.'] - elif k <= 0: - ds[0:0] = ['0']*(-k) - ds = ds[:p] - ds.extend(['0'] * (p - len(ds))) - ds[0:0]= ['0', '.'] - elif k >= len(ds): - ds.extend((k-len(ds))*['0'] + ['.'] + ['0']*p) - return ''.join(ds) - - def format(self): - v = maybe_float(self.value) - v, sign = self.numeric_preprocess(v) - if self.prec is None: - self.prec = 6 - r = self._format(v) - return self.numeric_postprocess(r, sign) - - -class FloatFFormatter(FloatFormatter): - def _format(self, v): - if v/1e25 > 1e25: - return FloatGFormatter('g', self.flags, self.width, - self.prec, self.value).format() - ds, k = flonum2digits(v) - digits = self.fDigits(ds, k) - if not self.flags.f_alt: - digits = digits.rstrip('.') - return digits - - -class FloatEFormatter(FloatFormatter): - def _format(self, v): - ds, k = flonum2digits(v) - digits = self.eDigits(ds) - return "%s%c%+03d"%(digits, self.char, k-1) - - -class FloatGFormatter(FloatFormatter): - # The description of %g in the Python documentation lies - # in a variety of minor ways. - # Gah, this still isn't quite right in the f_alt case. - # (One has to wonder who might care). - def _format(self, v): - ds, k = flonum2digits(v) - ds = ds[:self.prec] # XXX rounding! - if -4 < k <= self.prec: - digits = self.fDigits(ds, k) - if not self.flags.f_alt: - digits = digits.rstrip('0').rstrip('.') - r = digits - else: - digits = self.eDigits(ds) - if not self.flags.f_alt: - digits = digits.rstrip('0').rstrip('.') - r = "%se%+03d"%(digits, k-1) - return r - - -class HexFormatter(Formatter): - # NB: this has 2.4 semantics wrt. negative values - def format(self): - v, sign = self.numeric_preprocess(maybe_int(self.value)) - r = hex(v)[2:] - if self.prec is not None and len(r) < self.prec: - r = '0'*(self.prec - len(r)) + r - if self.flags.f_alt: - r = '0x' + r - if self.char == 'X': - r = r.upper() - return self.numeric_postprocess(r, sign) - - -class OctFormatter(Formatter): - # NB: this has 2.4 semantics wrt. negative values - def format(self): - v, sign = self.numeric_preprocess(maybe_int(self.value)) - r = oct(v) - if not self.flags.f_alt: - r = r[1:] - if self.prec is not None and len(r) < self.prec: - r = '0'*(self.prec - len(r)) + r - return self.numeric_postprocess(r, sign) - - -class IntFormatter(Formatter): - # NB: this has 2.4 semantics wrt. negative values (for %u) - def format(self): - v, sign = self.numeric_preprocess(maybe_int(self.value)) - r = str(v) - if self.prec is not None and len(r) < self.prec: - r = '0'*(self.prec - len(r)) + r - return self.numeric_postprocess(r, sign) - - -class CharFormatter(Formatter): - def format(self): - if isinstance(self.value, str): - v = self.value - if len(v) != 1: - raise TypeError, "%c requires int or char" - else: - i = maybe_int(self.value) - if not 0 <= i <= 255: - raise OverflowError("OverflowError: unsigned byte " - "integer is greater than maximum") - v = chr(i) - self.prec = None - return self.std_wp(v) - - -format_registry = { - 'd':IntFormatter, - 'i':IntFormatter, - 'o':OctFormatter, - 'u':IntFormatter, - 'x':HexFormatter, - 'X':HexFormatter, - 'e':FloatEFormatter, - 'E':FloatEFormatter, - 'f':FloatFFormatter, - 'F':FloatFFormatter, - 'g':FloatGFormatter, - 'G':FloatGFormatter, - 'c':CharFormatter, - 's':funcFormatter(str), - 'r':funcFormatter(repr), - # this *can* get accessed, by e.g. '%()4%'%{'':1}. - # The usual %% case has to be handled specially as it - # doesn't consume a value. - '%':funcFormatter(lambda x:'%'), - } - - -class FmtIter(object): - def __init__(self, fmt): - self.fmt = fmt - self.i = 0 - - def __iter__(self): - return self - - def next(self): - try: - c = self.fmt[self.i] - except IndexError: - raise StopIteration - self.i += 1 - return c - - def skip_to_fmt(self): - i = self.i - j = self.fmt.find('%', i) - if j < 0: - self.i = len(self.fmt) - return self.fmt[i:] - else: - self.i = j - return self.fmt[i:j] - - -def format(fmt, values, valuedict=None): - fmtiter = FmtIter(fmt) - valueiter = iter(values) - r = [] - try: - for c in fmtiter: - if c == '%': - t = parse_fmt(fmtiter, valueiter, valuedict) - try: - f = format_registry[t[0]] - except KeyError: - raise ValueError("unsupported format character " - "'%s' (%x) at index %d" - %(t[0], ord(t[0]), fmtiter.i)) - # Trying to translate this using the flow space. - # Currently, star args give a problem there, - # so let's be explicit about the args: - # r.append(f(*t).format()) - char, flags, width, prec, value = t - r.append(f(char, flags, width, prec, value).format()) - else: - # efficiency hack: - r.append(c + fmtiter.skip_to_fmt()) - except StopIteration: - raise ValueError, "incomplete format" - try: - valueiter.next() - except StopIteration: - pass - else: - if valuedict is None: - raise TypeError('not all arguments converted ' - 'during string formatting') - return ''.join(r) - diff --git a/pypy/appspace/_types.py b/pypy/appspace/_types.py deleted file mode 100644 index 4b7975f876..0000000000 --- a/pypy/appspace/_types.py +++ /dev/null @@ -1,551 +0,0 @@ -""" -Definition of the standard Python types. -""" - -# XXX we can't do this import yet -from sys import pypy -import __builtin__ -import _types - -__all__ = ['BooleanType', 'BufferType', 'BuiltinFunctionType', - 'BuiltinMethodType', 'ClassType', 'CodeType', 'ComplexType', - 'DictProxyType', 'DictType', 'DictionaryType', 'EllipsisType', - 'FileType', 'FloatType', 'FrameType', 'FunctionType', - 'GeneratorType', 'InstanceType', 'IntType', 'LambdaType', - 'ListType', 'LongType', 'MethodType', 'ModuleType', 'NoneType', - 'NotImplementedType', 'ObjectType', 'SliceType', 'StringType', - 'TracebackType', 'TupleType', 'TypeType', 'UnboundMethodType', - 'UnicodeType', 'XRangeType'] - - -def _register(factory, cls, in_builtin=True, synonym=True): - """ - Register factory as type cls. - - If in_builtin is a true value (which is the default), also - register the type as a built-in. If the value of in_builtin - is a string, use this name as the type name in the __builtin__ - module. - - If synonym is true (which is the default), also register the - type in this very module under its synonym. If synonym is a - string, use this string, else uppercase the class name and - append the string "Type". - """ - pypy.registertype(factory, cls) - if in_builtin: - if isinstance(in_builtin, str): - typename = in_builtin - else: - typename = cls.__name__ - setattr(__builtin__, typename, cls) - if synonym: - if isinstance(synonym, str): - typename = synonym - else: - typename = cls.__name__.title() + 'Type' - setattr(_types, typename, cls) - - -class object: - - def __new__(cls): - if cls is object: - return pypy.ObjectFactory() - else: - return pypy.UserObjectFactory(cls, pypy.ObjectFactory) - - def __repr__(self): - return '<%s object at 0x%x>' % (type(self).__name__, id(self)) - -_register(pypy.ObjectFactory, object) - - -class bool(int): - - def __new__(cls, *args): - if cls is bool: - return pypy.BoolObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.BoolObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.BoolObjectFactory, bool, synonym='BooleanType') - - -class buffer(object): - - def __new__(cls, *args): - if cls is buffer: - return pypy.BufferObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.BufferObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.BufferObjectFactory, buffer) - - -class builtin_function_or_method(object): - - def __new__(cls, *args): - if cls is builtin_function_or_method: - return pypy.Builtin_Function_Or_MethodObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, - pypy.Builtin_Function_Or_MethodObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.Builtin_Function_Or_MethodObjectFactory, - builtin_function_or_method, in_builtin=False, - synonym='BuiltinFunctionType') - -setattr(_types, 'BuiltinMethodType', builtin_function_or_method) - - -class classobj(object): - - def __new__(cls, *args): - if cls is classobj: - return pypy.ClassobjObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.ClassobjObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.ClassobjObjectFactory, classobj, in_builtin=False, - synonym='ClassType') - - -class code(object): - - def __new__(cls, *args): - if cls is code: - return pypy.CodeObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.CodeObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.CodeObjectFactory, code, in_builtin=False) - - -class complex(object): - - def __new__(cls, *args): - if cls is complex: - return pypy.ComplexObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.ComplexObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.ComplexObjectFactory, complex) - - -class dictproxy(object): - - def __new__(cls, *args): - if cls is dictproxy: - return pypy.DictproxyObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.DictproxyObjectFactory, - args) - - def __repr__(self): - return str(self) - -_register(pypy.DictproxyObjectFactory, dictproxy, in_builtin=False, - synonym='DictProxyType') - - -class dict(object): - - def __new__(cls, *args): - if cls is dict: - return pypy.DictObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.DictObjectFactory, args) - - def __repr__(self): - return str(self) - -# add to dict all and only those methods that used to be defined in -# objspace/std/dicttype.py, but get the implementations from the -# excellent code in UserDict.DictMixin instead (no inheritance for -# two reasons: we could not control what methods we get, and we do -# not want DictMixin among user-visible __bases__). -import UserDict -for attribute in ('update popitem get setdefault pop' - 'iteritems iterkeys itervalues').split(): - setattr(dict, attribute, UserDict.DictMixin.__dict__[attribute]) - -_register(pypy.DictObjectFactory, dict) - -setattr(_types, 'DictionaryType', dict) - - -class ellipsis(object): - - def __new__(cls, *args): - if cls is ellipsis: - return pypy.EllipsisObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.EllipsisObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.EllipsisObjectFactory, ellipsis, in_builtin=False) - - -class file(object): - - def __new__(cls, *args): - if cls is file: - return pypy.FileObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.FileObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.FileObjectFactory, file, in_builtin='open') - - -# XXX: for float as for all other classes, __new__ defers to a suitable -# factory from pypy; in floattype.py, the implementation of __new__ was -# quite differently, but we think that was not needed -- remove this -# comment once this is definitively established. -# NB: other rich implementations of __new__ from old *type.py modules -# not reproduced here: int, slice, str, tuple, type -class float(object): - - def __new__(cls, *args): - if cls is float: - return pypy.FloatObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.FloatObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.FloatObjectFactory, float) - - -class frame(object): - - def __new__(cls, *args): - if cls is frame: - return pypy.FrameObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.FrameObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.FrameObjectFactory, frame, in_builtin=False) - - -class function(object): - - def __new__(cls, *args): - if cls is function: - return pypy.FunctionObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.FunctionObjectFactory, args) - - def __repr__(self): - return str(self) - - # XXX find out details for builtin properties - func_code = pypy.builtin_property('fix') - func_globals = pypy.builtin_property('me') - -_register(pypy.FunctionObjectFactory, function, in_builtin=False) - - -class generator(object): - - def __new__(cls, *args): - if cls is generator: - return pypy.GeneratorObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.GeneratorObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.GeneratorObjectFactory, generator, in_builtin=False) - - -class instance(object): - - def __new__(cls, *args): - if cls is instance: - return pypy.InstanceObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.InstanceObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.InstanceObjectFactory, instance, in_builtin=False) - - -class int(object): - - def __new__(cls, *args): - if cls is int: - return pypy.IntObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.IntObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.IntObjectFactory, int) - -setattr(_types, 'LambdaType', function) - - -class list(object): - - def __new__(cls, *args): - if cls is list: - return pypy.ListObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.ListObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.ListObjectFactory, list) - - -class long(object): - - def __new__(cls, *args): - if cls is long: - return pypy.LongObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.LongObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.LongObjectFactory, long) - - -class instancemethod(object): - - def __new__(cls, *args): - if cls is instancemethod: - return pypy.InstancemethodObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, - pypy.InstancemethodObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.InstancemethodObjectFactory, instancemethod, in_builtin=False, - synonym='MethodType') - - -class module(object): - - def __new__(cls, *args): - if cls is module: - return pypy.ModuleObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.ModuleObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.ModuleObjectFactory, module, in_builtin=False) - - -class NoneType(object): - - def __new__(cls, *args): - if cls is NoneType: - return pypy.NonetypeObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.NonetypeObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.NonetypeObjectFactory, NoneType, in_builtin=False, - synonym='NoneType') - - -class NotImplementedType(object): - - def __new__(cls, *args): - if cls is NotImplementedType: - return pypy.NotimplementedtypeObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, - pypy.NotimplementedtypeObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.NotimplementedtypeObjectFactory, NotImplementedType, - in_builtin=False, synonym='NotImplementedType') - - -class slice(object): - - def __new__(cls, *args): - if cls is slice: - return pypy.SliceObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.SliceObjectFactory, args) - - def __repr__(self): - return str(self) - - def indices(self, length): - step = self.step - if step is None: - step = 1 - elif step == 0: - raise ValueError, "slice step cannot be zero" - if step < 0: - defstart = length - 1 - defstop = -1 - else: - defstart = 0 - defstop = length - - start = self.start - if start is None: - start = defstart - else: - if start < 0: - start += length - if start < 0: - if step < 0: - start = -1 - else: - start = 0 - elif start >= length: - if step < 0: - start = length - 1 - else: - start = length - - stop = self.stop - if stop is None: - stop = defstop - else: - if stop < 0: - stop += length - if stop < 0: - stop = -1 - elif stop > length: - stop = length - - return start, stop, step - -_register(pypy.SliceObjectFactory, slice) - - -class str(object): - - def __new__(cls, *args): - if cls is str: - return pypy.StrObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.StrObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.StrObjectFactory, str, synonym='StringType') - - -class traceback(object): - - def __new__(cls, *args): - if cls is traceback: - return pypy.TracebackObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.TracebackObjectFactory, - args) - - def __repr__(self): - return str(self) - -_register(pypy.TracebackObjectFactory, traceback, in_builtin=False) - - -class tuple(object): - - def __new__(cls, *args): - if cls is tuple: - return pypy.TupleObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.TupleObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.TupleObjectFactory, tuple) - - -class type(object): - - def __new__(cls, *args): - if cls is type: - return pypy.TypeObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.TypeObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.TypeObjectFactory, type) - -setattr(_types, 'UnboundMethodType', instancemethod) - - -class unicode(object): - - def __new__(cls, *args): - if cls is unicode: - return pypy.UnicodeObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.UnicodeObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.UnicodeObjectFactory, unicode) - - -class xrange(object): - - def __new__(cls, *args): - if cls is xrange: - return pypy.XrangeObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.XrangeObjectFactory, args) - - def __repr__(self): - return str(self) - -_register(pypy.XrangeObjectFactory, xrange, synonym='XRangeType') - diff --git a/pypy/appspace/autopath.py b/pypy/appspace/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/appspace/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/appspace/cStringIO.py b/pypy/appspace/cStringIO.py deleted file mode 100644 index 246db9aa2c..0000000000 --- a/pypy/appspace/cStringIO.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# One-liner implementation of cStringIO -# - -from StringIO import * diff --git a/pypy/appspace/cmathmodule.py b/pypy/appspace/cmathmodule.py deleted file mode 100644 index f887206ccc..0000000000 --- a/pypy/appspace/cmathmodule.py +++ /dev/null @@ -1,215 +0,0 @@ -"""This module is always available. It provides access to mathematical -functions for complex numbers.""" - -# Complex math module - -# much code borrowed from mathmodule.c - -import math - -M_PI = 3.141592653589793239 - - - -# constants -_one = complex(1., 0.) -_half = complex(0.5, 0.) -_i = complex(0., 1.) -_halfi = complex(0., 0.5) - - -# internal function not available from Python -def _prodi(x): - real = -x.imag - imag = x.real - return complex(real, imag) - - -def acos(x): - """acos(x) - - Return the arc cosine of x.""" - - return -(_prodi(log((x+(_i*sqrt((_one-(x*x)))))))) - - -def acosh(x): - """acosh(x) - - Return the hyperbolic arccosine of x.""" - - z = complex() - z = sqrt(_half) - z = log(z*(sqrt(x+_one)+sqrt(x-_one))) - return z+z - - -def asin(x): - """asin(x) - - Return the arc sine of x.""" - - # -i * log[(sqrt(1-x**2) + i*x] - squared = x*x - sqrt_1_minus_x_sq = sqrt(_one-squared) - return -(_prodi(log((sqrt_1_minus_x_sq+_prodi(x))))) - - -def asinh(x): - """asinh(x) - - Return the hyperbolic arc sine of x.""" - - z = complex() - z = sqrt(_half) - z = log((z * (sqrt(x+_i)+sqrt((x-_i))) )) - return z+z - - -def atan(x): - """atan(x) - - Return the arc tangent of x.""" - - return _halfi*log(((_i+x)/(_i-x))) - - -def atanh(x): - """atanh(x) - - Return the hyperbolic arc tangent of x.""" - - return _half*log((_one+x)/(_one-x)) - - -def cos(x): - """cos(x) - - Return the cosine of x.""" - - real = math.cos(x.real) * math.cosh(x.imag) - imag = -math.sin(x.real) * math.sinh(x.imag) - return complex(real, imag) - - -def cosh(x): - """cosh(x) - - Return the hyperbolic cosine of x.""" - - real = math.cos(x.imag) * math.cosh(x.real) - imag = math.sin(x.imag) * math.sinh(x.real) - return complex(real, imag) - - -def exp(x): - """exp(x) - - Return the exponential value e**x.""" - - l = math.exp(x.real) - real = l * math.cos(x.imag) - imag = l * math.sin(x.imag) - return complex(real, imag) - - -def log(x): - """log(x) - - Return the natural logarithm of x.""" - - l = math.hypot(x.real,x.imag) - imag = math.atan2(x.imag, x.real) - real = math.log(l) - return complex(real, imag) - - -def log10(x): - """log10(x) - - Return the base-10 logarithm of x.""" - - l = math.hypot(x.real, x.imag) - imag = math.atan2(x.imag, x.real)/math.log(10.) - real = math.log10(l) - return complex(real, imag) - - -def sin(x): - """sin(x) - - Return the sine of x.""" - - real = math.sin(x.real) * math.cosh(x.imag) - imag = math.cos(x.real) * math.sinh(x.imag) - return complex(real, imag) - - -def sinh(x): - """sinh(x) - - Return the hyperbolic sine of x.""" - - real = math.cos(x.imag) * math.sinh(x.real) - imag = math.sin(x.imag) * math.cosh(x.real) - return complex(real, imag) - - -def sqrt(x): - """sqrt(x) - - Return the square root of x.""" - - if x.real == 0. and x.imag == 0.: - real, imag = 0, 0 - else: - s = math.sqrt(0.5*(math.fabs(x.real) + math.hypot(x.real,x.imag))) - d = 0.5*x.imag/s - if x.real > 0.: - real = s - imag = d - elif x.imag >= 0.: - real = d - imag = s - else: - real = -d - imag = -s - return complex(real, imag) - - -def tan(x): - """tan(x) - - Return the tangent of x.""" - - sr = math.sin(x.real) - cr = math.cos(x.real) - shi = math.sinh(x.imag) - chi = math.cosh(x.imag) - rs = sr * chi - is_ = cr * shi - rc = cr * chi - ic = -sr * shi - d = rc*rc + ic * ic - real = (rs*rc + is_*ic) / d - imag = (is_*rc - rs*ic) / d - return complex(real, imag) - - -def tanh(x): - """tanh(x) - - Return the hyperbolic tangent of x.""" - - si = math.sin(x.imag) - ci = math.cos(x.imag) - shr = math.sinh(x.real) - chr = math.cosh(x.real) - rs = ci * shr - is_ = si * chr - rc = ci * chr - ic = si * shr - d = rc*rc + ic*ic - real = (rs*rc + is_*ic) / d - imag = (is_*rc - rs*ic) / d - return complex(real, imag) diff --git a/pypy/appspace/copy_reg.py b/pypy/appspace/copy_reg.py deleted file mode 100644 index ae61655903..0000000000 --- a/pypy/appspace/copy_reg.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Helper to provide extensibility for pickle/cPickle. - -!! This file has been copied practicaly verbatim from the CPython source. -!! See http://www.python.org/2.3.2/license.html for licensing info. - -This is only useful to add pickle support for extension types defined in -C, not for instances of user-defined classes. -""" - -from types import ClassType as _ClassType - -__all__ = ["pickle", "constructor", - "add_extension", "remove_extension", "clear_extension_cache"] - -dispatch_table = {} - -def pickle(ob_type, pickle_function, constructor_ob=None): -## if type(ob_type) is _ClassType: -## raise TypeError("copy_reg is not intended for use with classes") - - if not callable(pickle_function): - raise TypeError("reduction functions must be callable") - dispatch_table[ob_type] = pickle_function - - # The constructor_ob function is a vestige of safe for unpickling. - # There is no reason for the caller to pass it anymore. - if constructor_ob is not None: - constructor(constructor_ob) - -def constructor(object): - if not callable(object): - raise TypeError("constructors must be callable") - -# Example: provide pickling support for complex numbers. - -try: - complex -except NameError: - pass -else: - - def pickle_complex(c): - return complex, (c.real, c.imag) - - pickle(complex, pickle_complex, complex) - -# Support for pickling new-style objects - -def _reconstructor(cls, base, state): - if base is object: - obj = object.__new__(cls) - else: - obj = base.__new__(cls, state) - base.__init__(obj, state) - return obj - -_HEAPTYPE = 1<<9 - -# Python code for object.__reduce_ex__ for protocols 0 and 1 - -def _reduce_ex(self, proto): - assert proto < 2 - for base in self.__class__.__mro__: - if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: - break - else: - base = object # not really reachable - if base is object: - state = None - else: - if base is self.__class__: - raise TypeError, "can't pickle %s objects" % base.__name__ - state = base(self) - args = (self.__class__, base, state) - try: - getstate = self.__getstate__ - except AttributeError: - if getattr(self, "__slots__", None): - raise TypeError("a class that defines __slots__ without " - "defining __getstate__ cannot be pickled") - try: - dict = self.__dict__ - except AttributeError: - dict = None - else: - dict = getstate() - if dict: - return _reconstructor, args, dict - else: - return _reconstructor, args - -# Helper for __reduce_ex__ protocol 2 - -def __newobj__(cls, *args): - return cls.__new__(cls, *args) - -def _slotnames(cls): - """Return a list of slot names for a given class. - - This needs to find slots defined by the class and its bases, so we - can't simply return the __slots__ attribute. We must walk down - the Method Resolution Order and concatenate the __slots__ of each - class found there. (This assumes classes don't modify their - __slots__ attribute to misrepresent their slots after the class is - defined.) - """ - - # Get the value from a cache in the class if possible - names = cls.__dict__.get("__slotnames__") - if names is not None: - return names - - # Not cached -- calculate the value - names = [] - if not hasattr(cls, "__slots__"): - # This class has no slots - pass - else: - # Slots found -- gather slot names from all base classes - for c in cls.__mro__: - if "__slots__" in c.__dict__: - names += [name for name in c.__dict__["__slots__"] - if name not in ("__dict__", "__weakref__")] - - # Cache the outcome in the class if at all possible - try: - cls.__slotnames__ = names - except: - pass # But don't die if we can't - - return names - -# A registry of extension codes. This is an ad-hoc compression -# mechanism. Whenever a global reference to <module>, <name> is about -# to be pickled, the (<module>, <name>) tuple is looked up here to see -# if it is a registered extension code for it. Extension codes are -# universal, so that the meaning of a pickle does not depend on -# context. (There are also some codes reserved for local use that -# don't have this restriction.) Codes are positive ints; 0 is -# reserved. - -_extension_registry = {} # key -> code -_inverted_registry = {} # code -> key -_extension_cache = {} # code -> object -# Don't ever rebind those names: cPickle grabs a reference to them when -# it's initialized, and won't see a rebinding. - -def add_extension(module, name, code): - """Register an extension code.""" - code = int(code) - if not 1 <= code <= 0x7fffffff: - raise ValueError, "code out of range" - key = (module, name) - if (_extension_registry.get(key) == code and - _inverted_registry.get(code) == key): - return # Redundant registrations are benign - if key in _extension_registry: - raise ValueError("key %s is already registered with code %s" % - (key, _extension_registry[key])) - if code in _inverted_registry: - raise ValueError("code %s is already in use for key %s" % - (code, _inverted_registry[code])) - _extension_registry[key] = code - _inverted_registry[code] = key - -def remove_extension(module, name, code): - """Unregister an extension code. For testing only.""" - key = (module, name) - if (_extension_registry.get(key) != code or - _inverted_registry.get(code) != key): - raise ValueError("key %s is not registered with code %s" % - (key, code)) - del _extension_registry[key] - del _inverted_registry[code] - if code in _extension_cache: - del _extension_cache[code] - -def clear_extension_cache(): - _extension_cache.clear() - -# Standard extension code assignments - -# Reserved ranges - -# First Last Count Purpose -# 1 127 127 Reserved for Python standard library -# 128 191 64 Reserved for Zope -# 192 239 48 Reserved for 3rd parties -# 240 255 16 Reserved for private use (will never be assigned) -# 256 Inf Inf Reserved for future assignment - -# Extension codes are assigned by the Python Software Foundation. diff --git a/pypy/appspace/exceptions.py b/pypy/appspace/exceptions.py deleted file mode 100644 index b7da4142bd..0000000000 --- a/pypy/appspace/exceptions.py +++ /dev/null @@ -1,43 +0,0 @@ -ArithmeticError = ArithmeticError -AssertionError = AssertionError -AttributeError = AttributeError -DeprecationWarning = DeprecationWarning -EOFError = EOFError -EnvironmentError = EnvironmentError -Exception = Exception -FloatingPointError = FloatingPointError -FutureWarning = FutureWarning -IOError = IOError -ImportError = ImportError -IndentationError = IndentationError -IndexError = IndexError -KeyError = KeyError -KeyboardInterrupt = KeyboardInterrupt -LookupError = LookupError -MemoryError = MemoryError -NameError = NameError -NotImplementedError = NotImplementedError -OSError = OSError -OverflowError = OverflowError -OverflowWarning = OverflowWarning -PendingDeprecationWarning = PendingDeprecationWarning -ReferenceError = ReferenceError -RuntimeError = RuntimeError -RuntimeWarning = RuntimeWarning -StandardError = StandardError -StopIteration = StopIteration -SyntaxError = SyntaxError -SyntaxWarning = SyntaxWarning -SystemError = SystemError -SystemExit = SystemExit -TabError = TabError -TypeError = TypeError -UnboundLocalError = UnboundLocalError -UnicodeDecodeError = UnicodeDecodeError -UnicodeEncodeError = UnicodeEncodeError -UnicodeError = UnicodeError -UnicodeTranslateError = UnicodeTranslateError -UserWarning = UserWarning -ValueError = ValueError -Warning = Warning -ZeroDivisionError = ZeroDivisionError diff --git a/pypy/appspace/imp.py b/pypy/appspace/imp.py deleted file mode 100644 index fa1d883eda..0000000000 --- a/pypy/appspace/imp.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -This module provides the components needed to build your own -__import__ function. Undocumented functions are obsolete. -""" - -# XXX partial implementation - -# XXX to be reviewed - -import sys, os - -PY_SOURCE = 1 -PKG_DIRECTORY = 5 -C_BUILTIN = 6 - -def get_magic(): - return '\x3b\xf2\x0d\x0a' - -def get_suffixes(): - return [('.py', 'U', PY_SOURCE)] - -new_module = type(sys) - - -def find_module(name, path=None): - if path is None: - if name in sys.builtin_module_names: - return (None, name, ('', '', C_BUILTIN)) - path = sys.path - for base in path: - filename = os.path.join(base, name) - if os.path.isdir(filename): - return (None, filename, ('', '', PKG_DIRECTORY)) - filename += '.py' - if os.path.exists(filename): - return (file(filename, 'U'), filename, ('.py', 'U', PY_SOURCE)) - raise ImportError, 'No module named %s' % (name,) - - -def load_module(name, file, filename, description): - suffix, mode, type = description - module = sys.modules.get(name) - - if type == PY_SOURCE: - source = file.read() - co = compile(source, filename, 'exec') - if module is None: - sys.modules[name] = module = new_module(name) - module.__dict__.clear() - module.__name__ = name - module.__doc__ = None - module.__file__ = filename - exec co in module.__dict__ - return module - - if type == PKG_DIRECTORY: - initfilename = os.path.join(filename, '__init__.py') - if module is None: - sys.modules[name] = module = new_module(name) - module.__dict__.clear() - module.__name__ = name - module.__doc__ = None - module.__file__ = initfilename - module.__path__ = [filename] - execfile(initfilename, module.__dict__) - return module - - if type == C_BUILTIN: - module = __import__(name, {}, {}, None) - return module - - raise ValueError, 'invalid description argument: %r' % (description,) diff --git a/pypy/appspace/md5.py b/pypy/appspace/md5.py deleted file mode 100644 index 4868314a96..0000000000 --- a/pypy/appspace/md5.py +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/env python -# -*- coding: iso-8859-1 - -"""A sample implementation of MD5 in pure Python. - -This is an implementation of the MD5 hash function, as specified by -RFC 1321, in pure Python. It was implemented using Bruce Schneier's -excellent book "Applied Cryptography", 2nd ed., 1996. - -Surely this is not meant to compete with the existing implementation -of the Python standard library (written in C). Rather, it should be -seen as a Python complement that is more readable than C and can be -used more conveniently for learning and experimenting purposes in -the field of cryptography. - -This module tries very hard to follow the API of the existing Python -standard library's "md5" module, but although it seems to work fine, -it has not been extensively tested! (But note that there is a test -module, test_md5py.py, that compares this Python implementation with -the C one of the Python standard library. - -BEWARE: this comes with no guarantee whatsoever about fitness and/or -other properties! Specifically, do not use this in any production -code! License is Python License! - -Special thanks to Aurelian Coman who fixed some nasty bugs! - -Dinu C. Gherman -""" - - -__date__ = '2004-11-17' -__version__ = 0.91 # Modernised by J. Hallén and L. Creighton for Pypy - -__metaclass__ = type # or genrpy won't work - -import struct, copy - - -# ====================================================================== -# Bit-Manipulation helpers -# -# _long2bytes() was contributed by Barry Warsaw -# and is reused here with tiny modifications. -# ====================================================================== - -def _long2bytes(n, blocksize=0): - """Convert a long integer to a byte string. - - If optional blocksize is given and greater than zero, pad the front - of the byte string with binary zeros so that the length is a multiple - of blocksize. - """ - - # After much testing, this algorithm was deemed to be the fastest. - s = '' - pack = struct.pack - while n > 0: - ### CHANGED FROM '>I' TO '<I'. (DCG) - s = pack('<I', n & 0xffffffffL) + s - ### -------------------------- - n = n >> 32 - - # Strip off leading zeros. - for i in range(len(s)): - if s[i] <> '\000': - break - else: - # Only happens when n == 0. - s = '\000' - i = 0 - - s = s[i:] - - # Add back some pad bytes. This could be done more efficiently - # w.r.t. the de-padding being done above, but sigh... - if blocksize > 0 and len(s) % blocksize: - s = (blocksize - len(s) % blocksize) * '\000' + s - - return s - - -def _bytelist2long(list): - "Transform a list of characters into a list of longs." - - imax = len(list)/4 - hl = [0L] * imax - - j = 0 - i = 0 - while i < imax: - b0 = long(ord(list[j])) - b1 = (long(ord(list[j+1]))) << 8 - b2 = (long(ord(list[j+2]))) << 16 - b3 = (long(ord(list[j+3]))) << 24 - hl[i] = b0 | b1 |b2 | b3 - i = i+1 - j = j+4 - - return hl - - -def _rotateLeft(x, n): - "Rotate x (32 bit) left n bits circularly." - - return (x << n) | (x >> (32-n)) - - -# ====================================================================== -# The real MD5 meat... -# -# Implemented after "Applied Cryptography", 2nd ed., 1996, -# pp. 436-441 by Bruce Schneier. -# ====================================================================== - -# F, G, H and I are basic MD5 functions. - -def F(x, y, z): - return (x & y) | ((~x) & z) - -def G(x, y, z): - return (x & z) | (y & (~z)) - -def H(x, y, z): - return x ^ y ^ z - -def I(x, y, z): - return y ^ (x | (~z)) - - -def XX(func, a, b, c, d, x, s, ac): - """Wrapper for call distribution to functions F, G, H and I. - - This replaces functions FF, GG, HH and II from "Appl. Crypto." - Rotation is separate from addition to prevent recomputation - (now summed-up in one function). - """ - - res = 0L - res = res + a + func(b, c, d) - res = res + x - res = res + ac - res = res & 0xffffffffL - res = _rotateLeft(res, s) - res = res & 0xffffffffL - res = res + b - - return res & 0xffffffffL - - -class MD5: - "An implementation of the MD5 hash function in pure Python." - - def __init__(self): - "Initialisation." - - # Initial message length in bits(!). - self.length = 0L - self.count = [0, 0] - - # Initial empty message as a sequence of bytes (8 bit characters). - self.input = [] - - # Call a separate init function, that can be used repeatedly - # to start from scratch on the same object. - self.init() - - - def init(self): - "Initialize the message-digest and set all fields to zero." - - self.length = 0L - self.count = [0, 0] - self.input = [] - - # Load magic initialization constants. - self.A = 0x67452301L - self.B = 0xefcdab89L - self.C = 0x98badcfeL - self.D = 0x10325476L - - - def _transform(self, inp): - """Basic MD5 step transforming the digest based on the input. - - Note that if the Mysterious Constants are arranged backwards - in little-endian order and decrypted with the DES they produce - OCCULT MESSAGES! - """ - - a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D - - # Round 1. - - S11, S12, S13, S14 = 7, 12, 17, 22 - - a = XX(F, a, b, c, d, inp[ 0], S11, 0xD76AA478L) # 1 - d = XX(F, d, a, b, c, inp[ 1], S12, 0xE8C7B756L) # 2 - c = XX(F, c, d, a, b, inp[ 2], S13, 0x242070DBL) # 3 - b = XX(F, b, c, d, a, inp[ 3], S14, 0xC1BDCEEEL) # 4 - a = XX(F, a, b, c, d, inp[ 4], S11, 0xF57C0FAFL) # 5 - d = XX(F, d, a, b, c, inp[ 5], S12, 0x4787C62AL) # 6 - c = XX(F, c, d, a, b, inp[ 6], S13, 0xA8304613L) # 7 - b = XX(F, b, c, d, a, inp[ 7], S14, 0xFD469501L) # 8 - a = XX(F, a, b, c, d, inp[ 8], S11, 0x698098D8L) # 9 - d = XX(F, d, a, b, c, inp[ 9], S12, 0x8B44F7AFL) # 10 - c = XX(F, c, d, a, b, inp[10], S13, 0xFFFF5BB1L) # 11 - b = XX(F, b, c, d, a, inp[11], S14, 0x895CD7BEL) # 12 - a = XX(F, a, b, c, d, inp[12], S11, 0x6B901122L) # 13 - d = XX(F, d, a, b, c, inp[13], S12, 0xFD987193L) # 14 - c = XX(F, c, d, a, b, inp[14], S13, 0xA679438EL) # 15 - b = XX(F, b, c, d, a, inp[15], S14, 0x49B40821L) # 16 - - # Round 2. - - S21, S22, S23, S24 = 5, 9, 14, 20 - - a = XX(G, a, b, c, d, inp[ 1], S21, 0xF61E2562L) # 17 - d = XX(G, d, a, b, c, inp[ 6], S22, 0xC040B340L) # 18 - c = XX(G, c, d, a, b, inp[11], S23, 0x265E5A51L) # 19 - b = XX(G, b, c, d, a, inp[ 0], S24, 0xE9B6C7AAL) # 20 - a = XX(G, a, b, c, d, inp[ 5], S21, 0xD62F105DL) # 21 - d = XX(G, d, a, b, c, inp[10], S22, 0x02441453L) # 22 - c = XX(G, c, d, a, b, inp[15], S23, 0xD8A1E681L) # 23 - b = XX(G, b, c, d, a, inp[ 4], S24, 0xE7D3FBC8L) # 24 - a = XX(G, a, b, c, d, inp[ 9], S21, 0x21E1CDE6L) # 25 - d = XX(G, d, a, b, c, inp[14], S22, 0xC33707D6L) # 26 - c = XX(G, c, d, a, b, inp[ 3], S23, 0xF4D50D87L) # 27 - b = XX(G, b, c, d, a, inp[ 8], S24, 0x455A14EDL) # 28 - a = XX(G, a, b, c, d, inp[13], S21, 0xA9E3E905L) # 29 - d = XX(G, d, a, b, c, inp[ 2], S22, 0xFCEFA3F8L) # 30 - c = XX(G, c, d, a, b, inp[ 7], S23, 0x676F02D9L) # 31 - b = XX(G, b, c, d, a, inp[12], S24, 0x8D2A4C8AL) # 32 - - # Round 3. - - S31, S32, S33, S34 = 4, 11, 16, 23 - - a = XX(H, a, b, c, d, inp[ 5], S31, 0xFFFA3942L) # 33 - d = XX(H, d, a, b, c, inp[ 8], S32, 0x8771F681L) # 34 - c = XX(H, c, d, a, b, inp[11], S33, 0x6D9D6122L) # 35 - b = XX(H, b, c, d, a, inp[14], S34, 0xFDE5380CL) # 36 - a = XX(H, a, b, c, d, inp[ 1], S31, 0xA4BEEA44L) # 37 - d = XX(H, d, a, b, c, inp[ 4], S32, 0x4BDECFA9L) # 38 - c = XX(H, c, d, a, b, inp[ 7], S33, 0xF6BB4B60L) # 39 - b = XX(H, b, c, d, a, inp[10], S34, 0xBEBFBC70L) # 40 - a = XX(H, a, b, c, d, inp[13], S31, 0x289B7EC6L) # 41 - d = XX(H, d, a, b, c, inp[ 0], S32, 0xEAA127FAL) # 42 - c = XX(H, c, d, a, b, inp[ 3], S33, 0xD4EF3085L) # 43 - b = XX(H, b, c, d, a, inp[ 6], S34, 0x04881D05L) # 44 - a = XX(H, a, b, c, d, inp[ 9], S31, 0xD9D4D039L) # 45 - d = XX(H, d, a, b, c, inp[12], S32, 0xE6DB99E5L) # 46 - c = XX(H, c, d, a, b, inp[15], S33, 0x1FA27CF8L) # 47 - b = XX(H, b, c, d, a, inp[ 2], S34, 0xC4AC5665L) # 48 - - # Round 4. - - S41, S42, S43, S44 = 6, 10, 15, 21 - - a = XX(I, a, b, c, d, inp[ 0], S41, 0xF4292244L) # 49 - d = XX(I, d, a, b, c, inp[ 7], S42, 0x432AFF97L) # 50 - c = XX(I, c, d, a, b, inp[14], S43, 0xAB9423A7L) # 51 - b = XX(I, b, c, d, a, inp[ 5], S44, 0xFC93A039L) # 52 - a = XX(I, a, b, c, d, inp[12], S41, 0x655B59C3L) # 53 - d = XX(I, d, a, b, c, inp[ 3], S42, 0x8F0CCC92L) # 54 - c = XX(I, c, d, a, b, inp[10], S43, 0xFFEFF47DL) # 55 - b = XX(I, b, c, d, a, inp[ 1], S44, 0x85845DD1L) # 56 - a = XX(I, a, b, c, d, inp[ 8], S41, 0x6FA87E4FL) # 57 - d = XX(I, d, a, b, c, inp[15], S42, 0xFE2CE6E0L) # 58 - c = XX(I, c, d, a, b, inp[ 6], S43, 0xA3014314L) # 59 - b = XX(I, b, c, d, a, inp[13], S44, 0x4E0811A1L) # 60 - a = XX(I, a, b, c, d, inp[ 4], S41, 0xF7537E82L) # 61 - d = XX(I, d, a, b, c, inp[11], S42, 0xBD3AF235L) # 62 - c = XX(I, c, d, a, b, inp[ 2], S43, 0x2AD7D2BBL) # 63 - b = XX(I, b, c, d, a, inp[ 9], S44, 0xEB86D391L) # 64 - - A = (A + a) & 0xffffffffL - B = (B + b) & 0xffffffffL - C = (C + c) & 0xffffffffL - D = (D + d) & 0xffffffffL - - self.A, self.B, self.C, self.D = A, B, C, D - - - # Down from here all methods follow the Python Standard Library - # API of the md5 module. - - def update(self, inBuf): - """Add to the current message. - - Update the md5 object with the string arg. Repeated calls - are equivalent to a single call with the concatenation of all - the arguments, i.e. m.update(a); m.update(b) is equivalent - to m.update(a+b). - - The hash is immediately calculated for all full blocks. The final - calculation is made in digest(). This allows us to keep an - intermediate value for the hash, so that we only need to make - minimal recalculation if we call update() to add moredata to - the hashed string. - """ - - leninBuf = long(len(inBuf)) - - # Compute number of bytes mod 64. - index = (self.count[0] >> 3) & 0x3FL - - # Update number of bits. - self.count[0] = self.count[0] + (leninBuf << 3) - if self.count[0] < (leninBuf << 3): - self.count[1] = self.count[1] + 1 - self.count[1] = self.count[1] + (leninBuf >> 29) - - partLen = 64 - index - - if leninBuf >= partLen: - self.input[index:] = list(inBuf[:partLen]) - self._transform(_bytelist2long(self.input)) - i = partLen - while i + 63 < leninBuf: - self._transform(_bytelist2long(list(inBuf[i:i+64]))) - i = i + 64 - else: - self.input = list(inBuf[i:leninBuf]) - else: - i = 0 - self.input = self.input + list(inBuf) - - - def digest(self): - """Terminate the message-digest computation and return digest. - - Return the digest of the strings passed to the update() - method so far. This is a 16-byte string which may contain - non-ASCII characters, including null bytes. - """ - - A = self.A - B = self.B - C = self.C - D = self.D - input = [] + self.input - count = [] + self.count - - index = (self.count[0] >> 3) & 0x3fL - - if index < 56: - padLen = 56 - index - else: - padLen = 120 - index - - padding = ['\200'] + ['\000'] * 63 - self.update(padding[:padLen]) - - # Append length (before padding). - bits = _bytelist2long(self.input[:56]) + count - - self._transform(bits) - - # Store state in digest. - digest = _long2bytes(self.A << 96, 16)[:4] + \ - _long2bytes(self.B << 64, 16)[4:8] + \ - _long2bytes(self.C << 32, 16)[8:12] + \ - _long2bytes(self.D, 16)[12:] - - self.A = A - self.B = B - self.C = C - self.D = D - self.input = input - self.count = count - - return digest - - - def hexdigest(self): - """Terminate and return digest in HEX form. - - Like digest() except the digest is returned as a string of - length 32, containing only hexadecimal digits. This may be - used to exchange the value safely in email or other non- - binary environments. - """ - - return ''.join(['%02x' % ord(c) for c in self.digest()]) - - def copy(self): - """Return a clone object. - - Return a copy ('clone') of the md5 object. This can be used - to efficiently compute the digests of strings that share - a common initial substring. - """ - if 0: # set this to 1 to make the flow space crash - return copy.deepcopy(self) - clone = self.__class__() - clone.length = self.length - clone.count = [] + self.count[:] - clone.input = [] + self.input - clone.A = self.A - clone.B = self.B - clone.C = self.C - clone.D = self.D - return clone - - -# ====================================================================== -# Mimic Python top-level functions from standard library API -# for consistency with the md5 module of the standard library. -# ====================================================================== - -digest_size = 16 - -def new(arg=None): - """Return a new md5 object. - - If arg is present, the method call update(arg) is made. - """ - - md5 = MD5() - if arg: - md5.update(arg) - - return md5 - - -def md5(arg=None): - """Same as new(). - - For backward compatibility reasons, this is an alternative - name for the new() function. - """ - - return new(arg) diff --git a/pypy/appspace/operator.py b/pypy/appspace/operator.py deleted file mode 100644 index f6286d7f06..0000000000 --- a/pypy/appspace/operator.py +++ /dev/null @@ -1,163 +0,0 @@ -def abs(obj,): - 'abs(a) -- Same as abs(a).' - return abs(obj) -__abs__ = abs -def add(obj1, obj2): - 'add(a, b) -- Same as a + b.' - return obj1 + obj2 -__add__ = add -def and_(obj1,obj2): - 'and_(a, b) -- Same as a & b.' - return obj1 & obj2 -__and__ = and_ -def concat(obj1, obj2): - 'concat(a, b) -- Same as a + b, for a and b sequences.' - return obj1 + obj2 # XXX -def contains(obj1,obj2): - 'contains(a, b) -- Same as b in a (note reversed operands).' - return obj2 in obj1 -__contains__ = contains -def countOf(a,b): - 'countOf(a, b) -- Return the number of times b occurs in a.' - raise NotImplementedError -def delitem(obj, key): - 'delitem(a, b) -- Same as del a[b].' - del obj[key] -__delitem__ = delitem -def delslice(obj, start, end): - 'delslice(a, b, c) -- Same as del a[b:c].' - del obj[start:end] -__delslice__ = delslice -def div(a,b): - 'div(a, b) -- Same as a / b when __future__.division is not in effect.' - return a / b -__div__ = div -def eq(a, b): - 'eq(a, b) -- Same as a==b.' - return a == b -__eq__ = eq -def floordiv(a, b): - 'floordiv(a, b) -- Same as a // b.' - return a // b -__floordiv__ = floordiv -def ge(a, b): - 'ge(a, b) -- Same as a>=b.' - return a >= b -__ge__ = ge -def getitem(a, b): - 'getitem(a, b) -- Same as a[b].' - return a[b] -__getitem__ = getitem -def getslice(a, start, end): - 'getslice(a, b, c) -- Same as a[b:c].' - return a[start:end] -__getslice__ = getslice -def gt(a,b): - 'gt(a, b) -- Same as a>b.' - return a > b -__gt__ = gt -def indexOf(a, b): - 'indexOf(a, b) -- Return the first index of b in a.' - raise NotImplementedError -def inv(obj,): - 'inv(a) -- Same as ~a.' - return ~obj -__inv__ = inv -def invert(obj,): - 'invert(a) -- Same as ~a.' - return ~obj -__invert__ = invert -def isCallable(obj,): - 'isCallable(a) -- Same as callable(a).' - return callable(obj) -def isMappingType(obj,): - 'isMappingType(a) -- Return True if a has a mapping type, False otherwise.' - return hasattr(obj, '__getitem__') # Xxx only close -def isNumberType(obj,): - 'isNumberType(a) -- Return True if a has a numeric type, False otherwise.' - return hasattr(obj, '__int__') or hasattr(obj, '__float__') -def isSequenceType(obj,): - 'isSequenceType(a) -- Return True if a has a sequence type, False otherwise.' - return hasattr(obj, '__getitem__') # Xxx only close -def is_(a, b): - 'is_(a, b) -- Same as a is b.' - return a is b -def is_not(a, b): - 'is_not(a, b) -- Same as a is not b.' - return a is not b -def le(a, b): - 'le(a, b) -- Same as a<=b.' - return a <= b -__le__ = le -def lshift(a, b): - 'lshift(a, b) -- Same as a << b.' - return a << b -__lshift__ = lshift -def lt(a, b): - 'lt(a, b) -- Same as a<b.' - return a < b -__lt__ = lt -def mod(a, b): - 'mod(a, b) -- Same as a % b.' - return a % b -__mod__ = mod -def mul(a, b): - 'mul(a, b) -- Same as a * b.' - return a * b -__mul__ = mul -def ne(a, b): - 'ne(a, b) -- Same as a!=b.' - return a != b -__ne__ = ne -def neg(obj,): - 'neg(a) -- Same as -a.' - return -a -__neg__ = neg -def not_(obj,): - 'not_(a) -- Same as not a.' - return not obj -def or_(a, b): - 'or_(a, b) -- Same as a | b.' - return a | b -__or__ = or_ -def pos(obj,): - 'pos(a) -- Same as +a.' - return +obj -__pos__ = pos -def pow(a, b): - 'pow(a, b) -- Same as a**b.' - return a ** b -__pow__ = pow -def repeat(obj, num): - 'repeat(a, b) -- Return a * b, where a is a sequence, and b is an integer.' - return obj * num -def rshift(a, b): - 'rshift(a, b) -- Same as a >> b.' - return a >> b -__rshift__ = rshift -def sequenceIncludes(a, b): - 'sequenceIncludes(a, b) -- Same as b in a (note reversed operands; deprecated).' - raise NotImplementedError -def setitem(obj, key, value): - 'setitem(a, b, c) -- Same as a[b] = c.' - obj[key] = value -__setitem__ = setitem -def setslice(a, b, c, d): - 'setslice(a, b, c, d) -- Same as a[b:c] = d.' - a[b:c] = d -__setslice__ = setslice -def sub(a, b): - 'sub(a, b) -- Same as a - b.' - return a - b -__sub__ = sub -def truediv(a, b): - 'truediv(a, b) -- Same as a / b when __future__.division is in effect.' - return a / b -__truediv__ = truediv -def truth(a,): - 'truth(a) -- Return True if a is true, False otherwise.' - return not not a -def xor(a, b): - 'xor(a, b) -- Same as a ^ b.' - return a ^ b -__xor__ = xor diff --git a/pypy/appspace/pystone.py b/pypy/appspace/pystone.py deleted file mode 100644 index 168c399d02..0000000000 --- a/pypy/appspace/pystone.py +++ /dev/null @@ -1,267 +0,0 @@ -#! /usr/bin/env python - -""" -"PYSTONE" Benchmark Program - -Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes) - -Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013. - - Translated from ADA to C by Rick Richardson. - Every method to preserve ADA-likeness has been used, - at the expense of C-ness. - - Translated from C to Python by Guido van Rossum. - -Version History: - - Version 1.1 corrects two bugs in version 1.0: - - First, it leaked memory: in Proc1(), NextRecord ends - up having a pointer to itself. I have corrected this - by zapping NextRecord.PtrComp at the end of Proc1(). - - Second, Proc3() used the operator != to compare a - record to None. This is rather inefficient and not - true to the intention of the original benchmark (where - a pointer comparison to None is intended; the != - operator attempts to find a method __cmp__ to do value - comparison of the record). Version 1.1 runs 5-10 - percent faster than version 1.0, so benchmark figures - of different versions can't be compared directly. - -""" - -LOOPS = 50000 - -from time import clock - -__version__ = "1.1" - -[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6) - -class Record: - - def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0, - IntComp = 0, StringComp = 0): - self.PtrComp = PtrComp - self.Discr = Discr - self.EnumComp = EnumComp - self.IntComp = IntComp - self.StringComp = StringComp - - def copy(self): - return Record(self.PtrComp, self.Discr, self.EnumComp, - self.IntComp, self.StringComp) - -TRUE = 1 -FALSE = 0 - -def main(loops=LOOPS): - benchtime, stones = pystones(loops) - print "Pystone(%s) time for %d passes = %g" % \ - (__version__, loops, benchtime) - print "This machine benchmarks at %g pystones/second" % stones - - -def pystones(loops=LOOPS): - return Proc0(loops) - -IntGlob = 0 -BoolGlob = FALSE -Char1Glob = '\0' -Char2Glob = '\0' -Array1Glob = [0]*51 -Array2Glob = map(lambda x: x[:], [Array1Glob]*51) -PtrGlb = None -PtrGlbNext = None - -def Proc0(loops=LOOPS): - global IntGlob - global BoolGlob - global Char1Glob - global Char2Glob - global Array1Glob - global Array2Glob - global PtrGlb - global PtrGlbNext - - starttime = clock() - for i in range(loops): - pass - nulltime = clock() - starttime - - PtrGlbNext = Record() - PtrGlb = Record() - PtrGlb.PtrComp = PtrGlbNext - PtrGlb.Discr = Ident1 - PtrGlb.EnumComp = Ident3 - PtrGlb.IntComp = 40 - PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING" - String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING" - Array2Glob[8][7] = 10 - - starttime = clock() - - for i in range(loops): - Proc5() - Proc4() - IntLoc1 = 2 - IntLoc2 = 3 - String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING" - EnumLoc = Ident2 - BoolGlob = not Func2(String1Loc, String2Loc) - while IntLoc1 < IntLoc2: - IntLoc3 = 5 * IntLoc1 - IntLoc2 - IntLoc3 = Proc7(IntLoc1, IntLoc2) - IntLoc1 = IntLoc1 + 1 - Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3) - PtrGlb = Proc1(PtrGlb) - CharIndex = 'A' - while CharIndex <= Char2Glob: - if EnumLoc == Func1(CharIndex, 'C'): - EnumLoc = Proc6(Ident1) - CharIndex = chr(ord(CharIndex)+1) - IntLoc3 = IntLoc2 * IntLoc1 - IntLoc2 = IntLoc3 / IntLoc1 - IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1 - IntLoc1 = Proc2(IntLoc1) - - benchtime = clock() - starttime - nulltime - return benchtime, (loops / benchtime) - -def Proc1(PtrParIn): - PtrParIn.PtrComp = NextRecord = PtrGlb.copy() - PtrParIn.IntComp = 5 - NextRecord.IntComp = PtrParIn.IntComp - NextRecord.PtrComp = PtrParIn.PtrComp - NextRecord.PtrComp = Proc3(NextRecord.PtrComp) - if NextRecord.Discr == Ident1: - NextRecord.IntComp = 6 - NextRecord.EnumComp = Proc6(PtrParIn.EnumComp) - NextRecord.PtrComp = PtrGlb.PtrComp - NextRecord.IntComp = Proc7(NextRecord.IntComp, 10) - else: - PtrParIn = NextRecord.copy() - NextRecord.PtrComp = None - return PtrParIn - -def Proc2(IntParIO): - IntLoc = IntParIO + 10 - while 1: - if Char1Glob == 'A': - IntLoc = IntLoc - 1 - IntParIO = IntLoc - IntGlob - EnumLoc = Ident1 - if EnumLoc == Ident1: - break - return IntParIO - -def Proc3(PtrParOut): - global IntGlob - - if PtrGlb is not None: - PtrParOut = PtrGlb.PtrComp - else: - IntGlob = 100 - PtrGlb.IntComp = Proc7(10, IntGlob) - return PtrParOut - -def Proc4(): - global Char2Glob - - BoolLoc = Char1Glob == 'A' - BoolLoc = BoolLoc or BoolGlob - Char2Glob = 'B' - -def Proc5(): - global Char1Glob - global BoolGlob - - Char1Glob = 'A' - BoolGlob = FALSE - -def Proc6(EnumParIn): - EnumParOut = EnumParIn - if not Func3(EnumParIn): - EnumParOut = Ident4 - if EnumParIn == Ident1: - EnumParOut = Ident1 - elif EnumParIn == Ident2: - if IntGlob > 100: - EnumParOut = Ident1 - else: - EnumParOut = Ident4 - elif EnumParIn == Ident3: - EnumParOut = Ident2 - elif EnumParIn == Ident4: - pass - elif EnumParIn == Ident5: - EnumParOut = Ident3 - return EnumParOut - -def Proc7(IntParI1, IntParI2): - IntLoc = IntParI1 + 2 - IntParOut = IntParI2 + IntLoc - return IntParOut - -def Proc8(Array1Par, Array2Par, IntParI1, IntParI2): - global IntGlob - - IntLoc = IntParI1 + 5 - Array1Par[IntLoc] = IntParI2 - Array1Par[IntLoc+1] = Array1Par[IntLoc] - Array1Par[IntLoc+30] = IntLoc - for IntIndex in range(IntLoc, IntLoc+2): - Array2Par[IntLoc][IntIndex] = IntLoc - Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1 - Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc] - IntGlob = 5 - -def Func1(CharPar1, CharPar2): - CharLoc1 = CharPar1 - CharLoc2 = CharLoc1 - if CharLoc2 != CharPar2: - return Ident1 - else: - return Ident2 - -def Func2(StrParI1, StrParI2): - IntLoc = 1 - while IntLoc <= 1: - if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1: - CharLoc = 'A' - IntLoc = IntLoc + 1 - if CharLoc >= 'W' and CharLoc <= 'Z': - IntLoc = 7 - if CharLoc == 'X': - return TRUE - else: - if StrParI1 > StrParI2: - IntLoc = IntLoc + 7 - return TRUE - else: - return FALSE - -def Func3(EnumParIn): - EnumLoc = EnumParIn - if EnumLoc == Ident3: return TRUE - return FALSE - -if __name__ == '__main__': - import sys - def error(msg): - print >>sys.stderr, msg, - print >>sys.stderr, "usage: %s [number_of_loops]" % sys.argv[0] - sys.exit(100) - nargs = len(sys.argv) - 1 - if nargs > 1: - error("%d arguments are too many;" % nargs) - elif nargs == 1: - try: loops = int(sys.argv[1]) - except ValueError: - error("Invalid argument %r;" % sys.argv[1]) - else: - loops = LOOPS - main(loops) - diff --git a/pypy/appspace/random.py b/pypy/appspace/random.py deleted file mode 100644 index b8eead0fa8..0000000000 --- a/pypy/appspace/random.py +++ /dev/null @@ -1,782 +0,0 @@ -# -# This module is stolen from Python 2.2. -# Python 2.3 uses a C extension. -# - -"""Random variable generators. - - integers - -------- - uniform within range - - sequences - --------- - pick random element - generate random permutation - - distributions on the real line: - ------------------------------ - uniform - normal (Gaussian) - lognormal - negative exponential - gamma - beta - - distributions on the circle (angles 0 to 2pi) - --------------------------------------------- - circular uniform - von Mises - -Translated from anonymously contributed C/C++ source. - -Multi-threading note: the random number generator used here is not thread- -safe; it is possible that two calls return the same random value. However, -you can instantiate a different instance of Random() in each thread to get -generators that don't share state, then use .setstate() and .jumpahead() to -move the generators to disjoint segments of the full period. For example, - -def create_generators(num, delta, firstseed=None): - ""\"Return list of num distinct generators. - Each generator has its own unique segment of delta elements from - Random.random()'s full period. - Seed the first generator with optional arg firstseed (default is - None, to seed from current time). - ""\" - - from random import Random - g = Random(firstseed) - result = [g] - for i in range(num - 1): - laststate = g.getstate() - g = Random() - g.setstate(laststate) - g.jumpahead(delta) - result.append(g) - return result - -gens = create_generators(10, 1000000) - -That creates 10 distinct generators, which can be passed out to 10 distinct -threads. The generators don't share state so can be called safely in -parallel. So long as no thread calls its g.random() more than a million -times (the second argument to create_generators), the sequences seen by -each thread will not overlap. - -The period of the underlying Wichmann-Hill generator is 6,953,607,871,644, -and that limits how far this technique can be pushed. - -Just for fun, note that since we know the period, .jumpahead() can also be -used to "move backward in time": - ->>> g = Random(42) # arbitrary ->>> g.random() -0.25420336316883324 ->>> g.jumpahead(6953607871644L - 1) # move *back* one ->>> g.random() -0.25420336316883324 -""" -# XXX The docstring sucks. - -from math import log as _log, exp as _exp, pi as _pi, e as _e -from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin -from math import floor as _floor - -__all__ = ["Random","seed","random","uniform","randint","choice", - "randrange","shuffle","normalvariate","lognormvariate", - "cunifvariate","expovariate","vonmisesvariate","gammavariate", - "stdgamma","gauss","betavariate","paretovariate","weibullvariate", - "getstate","setstate","jumpahead","whseed"] - -def _verify(name, computed, expected): - if abs(computed - expected) > 1e-7: - raise ValueError( - "computed value for %s deviates too much " - "(computed %g, expected %g)" % (name, computed, expected)) - -NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0) -_verify('NV_MAGICCONST', NV_MAGICCONST, 1.71552776992141) - -TWOPI = 2.0*_pi -_verify('TWOPI', TWOPI, 6.28318530718) - -LOG4 = _log(4.0) -_verify('LOG4', LOG4, 1.38629436111989) - -SG_MAGICCONST = 1.0 + _log(4.5) -_verify('SG_MAGICCONST', SG_MAGICCONST, 2.50407739677627) - -del _verify - -# Translated by Guido van Rossum from C source provided by -# Adrian Baddeley. - -class Random: - """Random number generator base class used by bound module functions. - - Used to instantiate instances of Random to get generators that don't - share state. Especially useful for multi-threaded programs, creating - a different instance of Random for each thread, and using the jumpahead() - method to ensure that the generated sequences seen by each thread don't - overlap. - - Class Random can also be subclassed if you want to use a different basic - generator of your own devising: in that case, override the following - methods: random(), seed(), getstate(), setstate() and jumpahead(). - - """ - - VERSION = 1 # used by getstate/setstate - - def __init__(self, x=None): - """Initialize an instance. - - Optional argument x controls seeding, as for Random.seed(). - """ - - self.seed(x) - -## -------------------- core generator ------------------- - - # Specific to Wichmann-Hill generator. Subclasses wishing to use a - # different core generator should override the seed(), random(), - # getstate(), setstate() and jumpahead() methods. - - def seed(self, a=None): - """Initialize internal state from hashable object. - - None or no argument seeds from current time. - - If a is not None or an int or long, hash(a) is used instead. - - If a is an int or long, a is used directly. Distinct values between - 0 and 27814431486575L inclusive are guaranteed to yield distinct - internal states (this guarantee is specific to the default - Wichmann-Hill generator). - """ - - if a is None: - # Initialize from current time - import time - a = long(time.time() * 256) - - if type(a) not in (type(3), type(3L)): - a = hash(a) - - a, x = divmod(a, 30268) - a, y = divmod(a, 30306) - a, z = divmod(a, 30322) - self._seed = int(x)+1, int(y)+1, int(z)+1 - - self.gauss_next = None - - def random(self): - """Get the next random number in the range [0.0, 1.0).""" - - # Wichman-Hill random number generator. - # - # Wichmann, B. A. & Hill, I. D. (1982) - # Algorithm AS 183: - # An efficient and portable pseudo-random number generator - # Applied Statistics 31 (1982) 188-190 - # - # see also: - # Correction to Algorithm AS 183 - # Applied Statistics 33 (1984) 123 - # - # McLeod, A. I. (1985) - # A remark on Algorithm AS 183 - # Applied Statistics 34 (1985),198-200 - - # This part is thread-unsafe: - # BEGIN CRITICAL SECTION - x, y, z = self._seed - x = (171 * x) % 30269 - y = (172 * y) % 30307 - z = (170 * z) % 30323 - self._seed = x, y, z - # END CRITICAL SECTION - - # Note: on a platform using IEEE-754 double arithmetic, this can - # never return 0.0 (asserted by Tim; proof too long for a comment). - return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0 - - def getstate(self): - """Return internal state; can be passed to setstate() later.""" - return self.VERSION, self._seed, self.gauss_next - - def setstate(self, state): - """Restore internal state from object returned by getstate().""" - version = state[0] - if version == 1: - version, self._seed, self.gauss_next = state - else: - raise ValueError("state with version %s passed to " - "Random.setstate() of version %s" % - (version, self.VERSION)) - - def jumpahead(self, n): - """Act as if n calls to random() were made, but quickly. - - n is an int, greater than or equal to 0. - - Example use: If you have 2 threads and know that each will - consume no more than a million random numbers, create two Random - objects r1 and r2, then do - r2.setstate(r1.getstate()) - r2.jumpahead(1000000) - Then r1 and r2 will use guaranteed-disjoint segments of the full - period. - """ - - if not n >= 0: - raise ValueError("n must be >= 0") - x, y, z = self._seed - x = int(x * pow(171, n, 30269)) % 30269 - y = int(y * pow(172, n, 30307)) % 30307 - z = int(z * pow(170, n, 30323)) % 30323 - self._seed = x, y, z - - def __whseed(self, x=0, y=0, z=0): - """Set the Wichmann-Hill seed from (x, y, z). - - These must be integers in the range [0, 256). - """ - - if not type(x) == type(y) == type(z) == type(0): - raise TypeError('seeds must be integers') - if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256): - raise ValueError('seeds must be in range(0, 256)') - if 0 == x == y == z: - # Initialize from current time - import time - t = long(time.time() * 256) - t = int((t&0xffffff) ^ (t>>24)) - t, x = divmod(t, 256) - t, y = divmod(t, 256) - t, z = divmod(t, 256) - # Zero is a poor seed, so substitute 1 - self._seed = (x or 1, y or 1, z or 1) - - self.gauss_next = None - - def whseed(self, a=None): - """Seed from hashable object's hash code. - - None or no argument seeds from current time. It is not guaranteed - that objects with distinct hash codes lead to distinct internal - states. - - This is obsolete, provided for compatibility with the seed routine - used prior to Python 2.1. Use the .seed() method instead. - """ - - if a is None: - self.__whseed() - return - a = hash(a) - a, x = divmod(a, 256) - a, y = divmod(a, 256) - a, z = divmod(a, 256) - x = (x + a) % 256 or 1 - y = (y + a) % 256 or 1 - z = (z + a) % 256 or 1 - self.__whseed(x, y, z) - -## ---- Methods below this point do not need to be overridden when -## ---- subclassing for the purpose of using a different core generator. - -## -------------------- pickle support ------------------- - - def __getstate__(self): # for pickle - return self.getstate() - - def __setstate__(self, state): # for pickle - self.setstate(state) - -## -------------------- integer methods ------------------- - - def randrange(self, start, stop=None, step=1, int=int, default=None): - """Choose a random item from range(start, stop[, step]). - - This fixes the problem with randint() which includes the - endpoint; in Python this is usually not what you want. - Do not supply the 'int' and 'default' arguments. - """ - - # This code is a bit messy to make it fast for the - # common case while still doing adequate error checking. - istart = int(start) - if istart != start: - raise ValueError, "non-integer arg 1 for randrange()" - if stop is default: - if istart > 0: - return int(self.random() * istart) - raise ValueError, "empty range for randrange()" - - # stop argument supplied. - istop = int(stop) - if istop != stop: - raise ValueError, "non-integer stop for randrange()" - if step == 1 and istart < istop: - try: - return istart + int(self.random()*(istop - istart)) - except OverflowError: - # This can happen if istop-istart > sys.maxint + 1, and - # multiplying by random() doesn't reduce it to something - # <= sys.maxint. We know that the overall result fits - # in an int, and can still do it correctly via math.floor(). - # But that adds another function call, so for speed we - # avoided that whenever possible. - return int(istart + _floor(self.random()*(istop - istart))) - if step == 1: - raise ValueError, "empty range for randrange()" - - # Non-unit step argument supplied. - istep = int(step) - if istep != step: - raise ValueError, "non-integer step for randrange()" - if istep > 0: - n = (istop - istart + istep - 1) / istep - elif istep < 0: - n = (istop - istart + istep + 1) / istep - else: - raise ValueError, "zero step for randrange()" - - if n <= 0: - raise ValueError, "empty range for randrange()" - return istart + istep*int(self.random() * n) - - def randint(self, a, b): - """Return random integer in range [a, b], including both end points. - """ - - return self.randrange(a, b+1) - -## -------------------- sequence methods ------------------- - - def choice(self, seq): - """Choose a random element from a non-empty sequence.""" - return seq[int(self.random() * len(seq))] - - def shuffle(self, x, random=None, int=int): - """x, random=random.random -> shuffle list x in place; return None. - - Optional arg random is a 0-argument function returning a random - float in [0.0, 1.0); by default, the standard random.random. - - Note that for even rather small len(x), the total number of - permutations of x is larger than the period of most random number - generators; this implies that "most" permutations of a long - sequence can never be generated. - """ - - if random is None: - random = self.random - for i in xrange(len(x)-1, 0, -1): - # pick an element in x[:i+1] with which to exchange x[i] - j = int(random() * (i+1)) - x[i], x[j] = x[j], x[i] - -## -------------------- real-valued distributions ------------------- - -## -------------------- uniform distribution ------------------- - - def uniform(self, a, b): - """Get a random number in the range [a, b).""" - return a + (b-a) * self.random() - -## -------------------- normal distribution -------------------- - - def normalvariate(self, mu, sigma): - """Normal distribution. - - mu is the mean, and sigma is the standard deviation. - - """ - # mu = mean, sigma = standard deviation - - # Uses Kinderman and Monahan method. Reference: Kinderman, - # A.J. and Monahan, J.F., "Computer generation of random - # variables using the ratio of uniform deviates", ACM Trans - # Math Software, 3, (1977), pp257-260. - - random = self.random - while 1: - u1 = random() - u2 = random() - z = NV_MAGICCONST*(u1-0.5)/u2 - zz = z*z/4.0 - if zz <= -_log(u2): - break - return mu + z*sigma - -## -------------------- lognormal distribution -------------------- - - def lognormvariate(self, mu, sigma): - """Log normal distribution. - - If you take the natural logarithm of this distribution, you'll get a - normal distribution with mean mu and standard deviation sigma. - mu can have any value, and sigma must be greater than zero. - - """ - return _exp(self.normalvariate(mu, sigma)) - -## -------------------- circular uniform -------------------- - - def cunifvariate(self, mean, arc): - """Circular uniform distribution. - - mean is the mean angle, and arc is the range of the distribution, - centered around the mean angle. Both values must be expressed in - radians. Returned values range between mean - arc/2 and - mean + arc/2 and are normalized to between 0 and pi. - - Deprecated in version 2.3. Use: - (mean + arc * (Random.random() - 0.5)) % Math.pi - - """ - # mean: mean angle (in radians between 0 and pi) - # arc: range of distribution (in radians between 0 and pi) - - return (mean + arc * (self.random() - 0.5)) % _pi - -## -------------------- exponential distribution -------------------- - - def expovariate(self, lambd): - """Exponential distribution. - - lambd is 1.0 divided by the desired mean. (The parameter would be - called "lambda", but that is a reserved word in Python.) Returned - values range from 0 to positive infinity. - - """ - # lambd: rate lambd = 1/mean - # ('lambda' is a Python reserved word) - - random = self.random - u = random() - while u <= 1e-7: - u = random() - return -_log(u)/lambd - -## -------------------- von Mises distribution -------------------- - - def vonmisesvariate(self, mu, kappa): - """Circular data distribution. - - mu is the mean angle, expressed in radians between 0 and 2*pi, and - kappa is the concentration parameter, which must be greater than or - equal to zero. If kappa is equal to zero, this distribution reduces - to a uniform random angle over the range 0 to 2*pi. - - """ - # mu: mean angle (in radians between 0 and 2*pi) - # kappa: concentration parameter kappa (>= 0) - # if kappa = 0 generate uniform random angle - - # Based upon an algorithm published in: Fisher, N.I., - # "Statistical Analysis of Circular Data", Cambridge - # University Press, 1993. - - # Thanks to Magnus Kessler for a correction to the - # implementation of step 4. - - random = self.random - if kappa <= 1e-6: - return TWOPI * random() - - a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) - b = (a - _sqrt(2.0 * a))/(2.0 * kappa) - r = (1.0 + b * b)/(2.0 * b) - - while 1: - u1 = random() - - z = _cos(_pi * u1) - f = (1.0 + r * z)/(r + z) - c = kappa * (r - f) - - u2 = random() - - if not (u2 >= c * (2.0 - c) and u2 > c * _exp(1.0 - c)): - break - - u3 = random() - if u3 > 0.5: - theta = (mu % TWOPI) + _acos(f) - else: - theta = (mu % TWOPI) - _acos(f) - - return theta - -## -------------------- gamma distribution -------------------- - - def gammavariate(self, alpha, beta): - """Gamma distribution. Not the gamma function! - - Conditions on the parameters are alpha > 0 and beta > 0. - - """ - - # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 - - # Warning: a few older sources define the gamma distribution in terms - # of alpha > -1.0 - if alpha <= 0.0 or beta <= 0.0: - raise ValueError, 'gammavariate: alpha and beta must be > 0.0' - - random = self.random - if alpha > 1.0: - - # Uses R.C.H. Cheng, "The generation of Gamma - # variables with non-integral shape parameters", - # Applied Statistics, (1977), 26, No. 1, p71-74 - - ainv = _sqrt(2.0 * alpha - 1.0) - bbb = alpha - LOG4 - ccc = alpha + ainv - - while 1: - u1 = random() - u2 = random() - v = _log(u1/(1.0-u1))/ainv - x = alpha*_exp(v) - z = u1*u1*u2 - r = bbb+ccc*v-x - if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): - return x * beta - - elif alpha == 1.0: - # expovariate(1) - u = random() - while u <= 1e-7: - u = random() - return -_log(u) * beta - - else: # alpha is between 0 and 1 (exclusive) - - # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle - - while 1: - u = random() - b = (_e + alpha)/_e - p = b*u - if p <= 1.0: - x = pow(p, 1.0/alpha) - else: - # p > 1 - x = -_log((b-p)/alpha) - u1 = random() - if not (((p <= 1.0) and (u1 > _exp(-x))) or - ((p > 1) and (u1 > pow(x, alpha - 1.0)))): - break - return x * beta - - - def stdgamma(self, alpha, ainv, bbb, ccc): - # This method was (and shall remain) undocumented. - # This method is deprecated - # for the following reasons: - # 1. Returns same as .gammavariate(alpha, 1.0) - # 2. Requires caller to provide 3 extra arguments - # that are functions of alpha anyway - # 3. Can't be used for alpha < 0.5 - - # ainv = sqrt(2 * alpha - 1) - # bbb = alpha - log(4) - # ccc = alpha + ainv - import warnings - warnings.warn("The stdgamma function is deprecated; " - "use gammavariate() instead", - DeprecationWarning) - return self.gammavariate(alpha, 1.0) - - - -## -------------------- Gauss (faster alternative) -------------------- - - def gauss(self, mu, sigma): - """Gaussian distribution. - - mu is the mean, and sigma is the standard deviation. This is - slightly faster than the normalvariate() function. - - Not thread-safe without a lock around calls. - - """ - - # When x and y are two variables from [0, 1), uniformly - # distributed, then - # - # cos(2*pi*x)*sqrt(-2*log(1-y)) - # sin(2*pi*x)*sqrt(-2*log(1-y)) - # - # are two *independent* variables with normal distribution - # (mu = 0, sigma = 1). - # (Lambert Meertens) - # (corrected version; bug discovered by Mike Miller, fixed by LM) - - # Multithreading note: When two threads call this function - # simultaneously, it is possible that they will receive the - # same return value. The window is very small though. To - # avoid this, you have to use a lock around all calls. (I - # didn't want to slow this down in the serial case by using a - # lock here.) - - random = self.random - z = self.gauss_next - self.gauss_next = None - if z is None: - x2pi = random() * TWOPI - g2rad = _sqrt(-2.0 * _log(1.0 - random())) - z = _cos(x2pi) * g2rad - self.gauss_next = _sin(x2pi) * g2rad - - return mu + z*sigma - -## -------------------- beta -------------------- -## See -## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470 -## for Ivan Frohne's insightful analysis of why the original implementation: -## -## def betavariate(self, alpha, beta): -## # Discrete Event Simulation in C, pp 87-88. -## -## y = self.expovariate(alpha) -## z = self.expovariate(1.0/beta) -## return z/(y+z) -## -## was dead wrong, and how it probably got that way. - - def betavariate(self, alpha, beta): - """Beta distribution. - - Conditions on the parameters are alpha > -1 and beta} > -1. - Returned values range between 0 and 1. - - """ - - # This version due to Janne Sinkkonen, and matches all the std - # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). - y = self.gammavariate(alpha, 1.) - if y == 0: - return 0.0 - else: - return y / (y + self.gammavariate(beta, 1.)) - -## -------------------- Pareto -------------------- - - def paretovariate(self, alpha): - """Pareto distribution. alpha is the shape parameter.""" - # Jain, pg. 495 - - u = self.random() - return 1.0 / pow(u, 1.0/alpha) - -## -------------------- Weibull -------------------- - - def weibullvariate(self, alpha, beta): - """Weibull distribution. - - alpha is the scale parameter and beta is the shape parameter. - - """ - # Jain, pg. 499; bug fix courtesy Bill Arms - - u = self.random() - return alpha * pow(-_log(u), 1.0/beta) - -## -------------------- test program -------------------- - -def _test_generator(n, funccall): - import time - print n, 'times', funccall - code = compile(funccall, funccall, 'eval') - sum = 0.0 - sqsum = 0.0 - smallest = 1e10 - largest = -1e10 - t0 = time.time() - for i in range(n): - x = eval(code) - sum = sum + x - sqsum = sqsum + x*x - smallest = min(x, smallest) - largest = max(x, largest) - t1 = time.time() - print round(t1-t0, 3), 'sec,', - avg = sum/n - stddev = _sqrt(sqsum/n - avg*avg) - print 'avg %g, stddev %g, min %g, max %g' % \ - (avg, stddev, smallest, largest) - -def _test(N=20000): - print 'TWOPI =', TWOPI - print 'LOG4 =', LOG4 - print 'NV_MAGICCONST =', NV_MAGICCONST - print 'SG_MAGICCONST =', SG_MAGICCONST - _test_generator(N, 'random()') - _test_generator(N, 'normalvariate(0.0, 1.0)') - _test_generator(N, 'lognormvariate(0.0, 1.0)') - _test_generator(N, 'cunifvariate(0.0, 1.0)') - _test_generator(N, 'expovariate(1.0)') - _test_generator(N, 'vonmisesvariate(0.0, 1.0)') - _test_generator(N, 'gammavariate(0.01, 1.0)') - _test_generator(N, 'gammavariate(0.1, 1.0)') - _test_generator(N, 'gammavariate(0.1, 2.0)') - _test_generator(N, 'gammavariate(0.5, 1.0)') - _test_generator(N, 'gammavariate(0.9, 1.0)') - _test_generator(N, 'gammavariate(1.0, 1.0)') - _test_generator(N, 'gammavariate(2.0, 1.0)') - _test_generator(N, 'gammavariate(20.0, 1.0)') - _test_generator(N, 'gammavariate(200.0, 1.0)') - _test_generator(N, 'gauss(0.0, 1.0)') - _test_generator(N, 'betavariate(3.0, 3.0)') - _test_generator(N, 'paretovariate(1.0)') - _test_generator(N, 'weibullvariate(1.0, 1.0)') - - # Test jumpahead. - s = getstate() - jumpahead(N) - r1 = random() - # now do it the slow way - setstate(s) - for i in range(N): - random() - r2 = random() - if r1 != r2: - raise ValueError("jumpahead test failed " + `(N, r1, r2)`) - -# Create one instance, seeded from current time, and export its methods -# as module-level functions. The functions are not threadsafe, and state -# is shared across all uses (both in the user's code and in the Python -# libraries), but that's fine for most programs and is easier for the -# casual user than making them instantiate their own Random() instance. -_inst = Random() -seed = _inst.seed -random = _inst.random -uniform = _inst.uniform -randint = _inst.randint -choice = _inst.choice -randrange = _inst.randrange -shuffle = _inst.shuffle -normalvariate = _inst.normalvariate -lognormvariate = _inst.lognormvariate -cunifvariate = _inst.cunifvariate -expovariate = _inst.expovariate -vonmisesvariate = _inst.vonmisesvariate -gammavariate = _inst.gammavariate -stdgamma = _inst.stdgamma -gauss = _inst.gauss -betavariate = _inst.betavariate -paretovariate = _inst.paretovariate -weibullvariate = _inst.weibullvariate -getstate = _inst.getstate -setstate = _inst.setstate -jumpahead = _inst.jumpahead -whseed = _inst.whseed - -if __name__ == '__main__': - _test() diff --git a/pypy/appspace/sha.py b/pypy/appspace/sha.py deleted file mode 100644 index 3738650a01..0000000000 --- a/pypy/appspace/sha.py +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/env python -# -*- coding: iso-8859-1 - -"""A sample implementation of SHA-1 in pure Python. - - Framework adapted from Dinu Gherman's MD5 implementation by - J. Hallén and L. Creighton. SHA-1 implementation based directly on - the text of the NIST standard FIPS PUB 180-1. -""" - - -__date__ = '2004-11-17' -__version__ = 0.91 # Modernised by J. Hallén and L. Creighton for Pypy - - -import struct, copy - - -# ====================================================================== -# Bit-Manipulation helpers -# -# _long2bytes() was contributed by Barry Warsaw -# and is reused here with tiny modifications. -# ====================================================================== - -def _long2bytesBigEndian(n, blocksize=0): - """Convert a long integer to a byte string. - - If optional blocksize is given and greater than zero, pad the front - of the byte string with binary zeros so that the length is a multiple - of blocksize. - """ - - # After much testing, this algorithm was deemed to be the fastest. - s = '' - pack = struct.pack - while n > 0: - s = pack('>I', n & 0xffffffffL) + s - n = n >> 32 - - # Strip off leading zeros. - for i in range(len(s)): - if s[i] <> '\000': - break - else: - # Only happens when n == 0. - s = '\000' - i = 0 - - s = s[i:] - - # Add back some pad bytes. This could be done more efficiently - # w.r.t. the de-padding being done above, but sigh... - if blocksize > 0 and len(s) % blocksize: - s = (blocksize - len(s) % blocksize) * '\000' + s - - return s - - -def _bytelist2longBigEndian(list): - "Transform a list of characters into a list of longs." - - imax = len(list)/4 - hl = [0L] * imax - - j = 0 - i = 0 - while i < imax: - b0 = long(ord(list[j])) << 24 - b1 = long(ord(list[j+1])) << 16 - b2 = long(ord(list[j+2])) << 8 - b3 = long(ord(list[j+3])) - hl[i] = b0 | b1 | b2 | b3 - i = i+1 - j = j+4 - - return hl - - -def _rotateLeft(x, n): - "Rotate x (32 bit) left n bits circularly." - - return (x << n) | (x >> (32-n)) - - -# ====================================================================== -# The SHA transformation functions -# -# ====================================================================== - -def f0_19(B, C, D): - return (B & C) | ((~ B) & D) - -def f20_39(B, C, D): - return B ^ C ^ D - -def f40_59(B, C, D): - return (B & C) | (B & D) | (C & D) - -def f60_79(B, C, D): - return B ^ C ^ D - - -f = [f0_19, f20_39, f40_59, f60_79] - -# Constants to be used -K = [ - 0x5A827999L, # ( 0 <= t <= 19) - 0x6ED9EBA1L, # (20 <= t <= 39) - 0x8F1BBCDCL, # (40 <= t <= 59) - 0xCA62C1D6L # (60 <= t <= 79) - ] - -class MD5: - "An implementation of the MD5 hash function in pure Python." - - def __init__(self): - "Initialisation." - - # Initial message length in bits(!). - self.length = 0L - self.count = [0, 0] - - # Initial empty message as a sequence of bytes (8 bit characters). - self.input = [] - - # Call a separate init function, that can be used repeatedly - # to start from scratch on the same object. - self.init() - - - def init(self): - "Initialize the message-digest and set all fields to zero." - - self.length = 0L - self.input = [] - - # Initial 160 bit message digest (5 times 32 bit). - self.H0 = 0x67452301L - self.H1 = 0xEFCDAB89L - self.H2 = 0x98BADCFEL - self.H3 = 0x10325476L - self.H4 = 0xC3D2E1F0L - - def _transform(self, W): - - for t in range(16, 80): - W.append(_rotateLeft( - W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1) & 0xffffffffL) - - A = self.H0 - B = self.H1 - C = self.H2 - D = self.H3 - E = self.H4 - - """ - This loop was unrolled to gain about 10% in speed - for t in range(0, 80): - TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - """ - - for t in range(0, 20): - TEMP = _rotateLeft(A, 5) + ((B & C) | ((~ B) & D)) + E + W[t] + K[0] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - - for t in range(20, 40): - TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[1] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - - for t in range(40, 60): - TEMP = _rotateLeft(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[t] + K[2] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - - for t in range(60, 80): - TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[3] - E = D - D = C - C = _rotateLeft(B, 30) & 0xffffffffL - B = A - A = TEMP & 0xffffffffL - - - self.H0 = (self.H0 + A) & 0xffffffffL - self.H1 = (self.H1 + B) & 0xffffffffL - self.H2 = (self.H2 + C) & 0xffffffffL - self.H3 = (self.H3 + D) & 0xffffffffL - self.H4 = (self.H4 + E) & 0xffffffffL - - - # Down from here all methods follow the Python Standard Library - # API of the sha module. - - def update(self, inBuf): - """Add to the current message. - - Update the md5 object with the string arg. Repeated calls - are equivalent to a single call with the concatenation of all - the arguments, i.e. m.update(a); m.update(b) is equivalent - to m.update(a+b). - - The hash is immediately calculated for all full blocks. The final - calculation is made in digest(). It will calculate 1-2 blocks, - depending on how much padding we have to add. This allows us to - keep an intermediate value for the hash, so that we only need to - make minimal recalculation if we call update() to add more data - to the hashed string. - """ - - leninBuf = long(len(inBuf)) - - # Compute number of bytes mod 64. - index = (self.count[1] >> 3) & 0x3FL - - # Update number of bits. - self.count[1] = self.count[1] + (leninBuf << 3) - if self.count[1] < (leninBuf << 3): - self.count[0] = self.count[0] + 1 - self.count[0] = self.count[0] + (leninBuf >> 29) - - partLen = 64 - index - - if leninBuf >= partLen: - self.input[index:] = list(inBuf[:partLen]) - self._transform(_bytelist2longBigEndian(self.input)) - i = partLen - while i + 63 < leninBuf: - self._transform(_bytelist2longBigEndian(list(inBuf[i:i+64]))) - i = i + 64 - else: - self.input = list(inBuf[i:leninBuf]) - else: - i = 0 - self.input = self.input + list(inBuf) - - - def digest(self): - """Terminate the message-digest computation and return digest. - - Return the digest of the strings passed to the update() - method so far. This is a 16-byte string which may contain - non-ASCII characters, including null bytes. - """ - - H0 = self.H0 - H1 = self.H1 - H2 = self.H2 - H3 = self.H3 - H4 = self.H4 - input = [] + self.input - count = [] + self.count - - index = (self.count[1] >> 3) & 0x3fL - - if index < 56: - padLen = 56 - index - else: - padLen = 120 - index - - padding = ['\200'] + ['\000'] * 63 - self.update(padding[:padLen]) - - # Append length (before padding). - bits = _bytelist2longBigEndian(self.input[:56]) + count - - self._transform(bits) - - # Store state in digest. - digest = _long2bytesBigEndian(self.H0, 4) + \ - _long2bytesBigEndian(self.H1, 4) + \ - _long2bytesBigEndian(self.H2, 4) + \ - _long2bytesBigEndian(self.H3, 4) + \ - _long2bytesBigEndian(self.H4, 4) - - self.H0 = H0 - self.H1 = H1 - self.H2 = H2 - self.H3 = H3 - self.H4 = H4 - self.input = input - self.count = count - - return digest - - - def hexdigest(self): - """Terminate and return digest in HEX form. - - Like digest() except the digest is returned as a string of - length 32, containing only hexadecimal digits. This may be - used to exchange the value safely in email or other non- - binary environments. - """ - return ''.join(['%02x' % ord(c) for c in self.digest()]) - - def copy(self): - """Return a clone object. - - Return a copy ('clone') of the md5 object. This can be used - to efficiently compute the digests of strings that share - a common initial substring. - """ - - return copy.deepcopy(self) - - -# ====================================================================== -# Mimic Python top-level functions from standard library API -# for consistency with the md5 module of the standard library. -# ====================================================================== - -digest_size = 16 - -def new(arg=None): - """Return a new md5 object. - - If arg is present, the method call update(arg) is made. - """ - - md5 = MD5() - if arg: - md5.update(arg) - - return md5 diff --git a/pypy/appspace/sio.py b/pypy/appspace/sio.py deleted file mode 100644 index 353fe9111e..0000000000 --- a/pypy/appspace/sio.py +++ /dev/null @@ -1,867 +0,0 @@ -"""New standard I/O library. - -This code is still very young and experimental! - -There are fairly complete unit tests in test_sio.py. - -The design is simple: - -- A raw stream supports read(n), write(s), seek(offset, whence=0) and - tell(). This is generally unbuffered. Raw streams may support - Unicode. - -- A basis stream provides the raw stream API and builds on a much more - low-level API, e.g. the os, mmap or socket modules. - -- A filtering stream is raw stream built on top of another raw stream. - There are filtering streams for universal newline translation and - for unicode translation. - -- A buffering stream supports the full classic Python I/O API: - read(n=-1), readline(), readlines(sizehint=0), tell(), seek(offset, - whence=0), write(s), writelines(lst), as well as __iter__() and - next(). (There's also readall() but that's a synonym for read() - without arguments.) This is a superset of the raw stream API. I - haven't thought about fileno() and isatty() yet, nor about - truncate() or the various attributes like name and mode. Also, - close() is not implemented right. We really need only one buffering - stream implementation, which is a filtering stream. - -You typically take a basis stream, place zero or more filtering -streams on top of it, and then top it off with a buffering stream. - -""" - -import os -import mmap - -class Stream(object): - "All streams except the base ones need to inherit from this class." - def __getattr__(self, name): - """ - Delegate all other methods to the underlying file object. - """ - return getattr(self.base, name) - -class BufferingInputStream(Stream): - - """Standard buffering input stream. - - This is typically the top of the stack. - """ - - bigsize = 2**19 # Half a Meg - bufsize = 2**13 # 8 K - - def __init__(self, base, bufsize=None): - self.do_read = getattr(base, "read", None) - # function to fill buffer some more - self.do_tell = getattr(base, "tell", None) - # None, or return a byte offset - self.do_seek = getattr(base, "seek", None) - # None, or seek to a byte offset - self.close = base.close - - if bufsize is None: # Get default from the class - bufsize = self.bufsize - self.bufsize = bufsize # buffer size (hint only) - self.lines = [] # ready-made lines (sans "\n") - self.buf = "" # raw data (may contain "\n") - # Invariant: readahead == "\n".join(self.lines + [self.buf]) - # self.lines contains no "\n" - # self.buf may contain "\n" - - def tell(self): - bytes = self.do_tell() # This may fail - offset = len(self.buf) - for line in self.lines: - offset += len(line) + 1 - assert bytes >= offset, (locals(), self.__dict__) - return bytes - offset - - def seek(self, offset, whence=0): - # This may fail on the do_seek() or do_tell() call. - # But it won't call either on a relative forward seek. - # Nor on a seek to the very end. - if whence == 0 or (whence == 2 and self.do_seek is not None): - self.do_seek(offset, whence) - self.lines = [] - self.buf = "" - return - if whence == 2: - # Skip relative to EOF by reading and saving only just as - # much as needed - assert self.do_seek is None - data = "\n".join(self.lines + [self.buf]) - total = len(data) - buffers = [data] - self.lines = [] - self.buf = "" - while 1: - data = self.do_read(self.bufsize) - if not data: - break - buffers.append(data) - total += len(data) - while buffers and total >= len(buffers[0]) - offset: - total -= len(buffers[0]) - del buffers[0] - cutoff = total + offset - if cutoff < 0: - raise TypeError, "cannot seek back" - if buffers: - buffers[0] = buffers[0][cutoff:] - self.buf = "".join(buffers) - self.lines = [] - return - if whence == 1: - if offset < 0: - self.do_seek(self.tell() + offset, 0) - self.lines = [] - self.buf = "" - return - while self.lines: - line = self.lines[0] - if offset <= len(line): - self.lines[0] = line[offset:] - return - offset -= len(self.lines[0]) - 1 - del self.lines[0] - assert not self.lines - if offset <= len(self.buf): - self.buf = self.buf[offset:] - return - offset -= len(self.buf) - self.buf = "" - if self.do_seek is None: - self.read(offset) - else: - self.do_seek(offset, 1) - return - raise ValueError, "whence should be 0, 1 or 2" - - def readall(self): - self.lines.append(self.buf) - more = ["\n".join(self.lines)] - self.lines = [] - self.buf = "" - bufsize = self.bufsize - while 1: - data = self.do_read(bufsize) - if not data: - break - more.append(data) - bufsize = max(bufsize*2, self.bigsize) - return "".join(more) - - def read(self, n=-1): - if n < 0: - return self.readall() - - if self.lines: - # See if this can be satisfied from self.lines[0] - line = self.lines[0] - if len(line) >= n: - self.lines[0] = line[n:] - return line[:n] - - # See if this can be satisfied *without exhausting* self.lines - k = 0 - i = 0 - for line in self.lines: - k += len(line) - if k >= n: - lines = self.lines[:i] - data = self.lines[i] - cutoff = len(data) - (k-n) - lines.append(data[:cutoff]) - self.lines[:i+1] = [data[cutoff:]] - return "\n".join(lines) - k += 1 - i += 1 - - # See if this can be satisfied from self.lines plus self.buf - if k + len(self.buf) >= n: - lines = self.lines - self.lines = [] - cutoff = n - k - lines.append(self.buf[:cutoff]) - self.buf = self.buf[cutoff:] - return "\n".join(lines) - - else: - # See if this can be satisfied from self.buf - data = self.buf - k = len(data) - if k >= n: - cutoff = len(data) - (k-n) - self.buf = data[cutoff:] - return data[:cutoff] - - lines = self.lines - self.lines = [] - lines.append(self.buf) - self.buf = "" - data = "\n".join(lines) - more = [data] - k = len(data) - while k < n: - data = self.do_read(max(self.bufsize, n-k)) - k += len(data) - more.append(data) - if not data: - break - cutoff = len(data) - (k-n) - self.buf = data[cutoff:] - more[-1] = data[:cutoff] - return "".join(more) - - def __iter__(self): - return self - - def next(self): - if self.lines: - return self.lines.pop(0) + "\n" - - # This block is needed because read() can leave self.buf - # containing newlines - self.lines = self.buf.split("\n") - self.buf = self.lines.pop() - if self.lines: - return self.lines.pop(0) + "\n" - - buf = self.buf and [self.buf] or [] - while 1: - self.buf = self.do_read(self.bufsize) - self.lines = self.buf.split("\n") - self.buf = self.lines.pop() - if self.lines: - buf.append(self.lines.pop(0)) - buf.append("\n") - break - if not self.buf: - break - buf.append(self.buf) - - line = "".join(buf) - if not line: - raise StopIteration - return line - - def readline(self): - try: - return self.next() - except StopIteration: - return "" - - def readlines(self, sizehint=0): - return list(self) - -class BufferingOutputStream(Stream): - - """Standard buffering output stream. - - This is typically the top of the stack. - """ - - bigsize = 2**19 # Half a Meg - bufsize = 2**13 # 8 K - - def __init__(self, base, bufsize=None): - self.do_write = base.write # Flush buffer - self.do_tell = base.tell - # Return a byte offset; has to exist or this __init__() will fail - self.do_seek = getattr(base, "seek", None) - # None, or seek to a byte offset - self.do_close = base.close # Close file - self.do_truncate = base.truncate # Truncate file - - if bufsize is None: # Get default from the class - bufsize = self.bufsize - self.bufsize = bufsize # buffer size (hint only) - self.buf = "" - self.tell() - - def tell(self): - assert self.do_tell is not None - if not hasattr(self, 'pos'): - self.pos = self.do_tell() - - return self.pos - - def seek(self, offset, whence=0): - self.flush() - self.do_seek(offset, whence) - self.pos = self.do_tell() - - def flush(self): - self.do_write(self.buf) - self.buf = '' - - def write(self, data): - buflen = len(self.buf) - datalen = len(data) - if datalen + buflen < self.bufsize: - self.buf += data - self.pos += datalen - else: - self.buf += data[:self.bufsize-buflen] - self.pos += self.bufsize-buflen - self.do_write(self.buf) - self.buf = '' - self.write(data[self.bufsize-buflen:]) - - def writelines(self, sequence): - for s in sequence: - self.write(s) - - def close(self): - self.do_write(self.buf) - self.buf = '' - if self.do_close(): - self.do_close() - - def truncate(self, size=None): - self.flush() - self.do_truncate(size) - -class LineBufferingOutputStream(BufferingOutputStream): - - """Line buffering output stream. - - This is typically the top of the stack. - """ - - def __init__(self, base, bufsize=None): - self.do_write = base.write # Flush buffer - self.do_tell = base.tell - # Return a byte offset; has to exist or this __init__() will fail - self.do_seek = getattr(base, "seek", None) - # None, or seek to a byte offset - self.do_close = base.close # Close file - self.do_truncate = base.truncate # Truncate file - - self.linesep = os.linesep - self.buf = "" # raw data (may contain "\n") - self.tell() - - def write(self, data): - all_lines = data.split(self.linesep) - full_lines = all_lines[:-1] - for line in full_lines: - line += self.linesep - buflen = len(self.buf) - linelen = len(line) - if linelen + buflen < self.bufsize: - self.buf += line - self.pos += linelen - self.do_write(self.buf) - self.buf = '' - else: - self.buf += line[:self.bufsize-buflen] - self.pos += self.bufsize-buflen - self.do_write(self.buf) - self.buf = '' - self.write(line[self.bufsize-buflen:]) - - # The last part of the split data never has a terminating linesep. - # If the data has a terminating linesep, the last element is an - # empty string. - - line = all_lines[-1] - buflen = len(self.buf) - linelen = len(line) - if linelen + buflen < self.bufsize: - self.buf += line - self.pos += linelen - else: - self.buf += line[:self.bufsize-buflen] - self.pos += self.bufsize-buflen - self.do_write(self.buf) - self.buf = '' - self.write(line[self.bufsize-buflen:]) - -class BufferingInputOutputStream(Stream): - """To handle buffered input and output at the same time, we are - switching back and forth between using BuffereingInputStream - and BufferingOutputStream as reads and writes are done. - A more optimal solution would be to read and write on the same - buffer, but it would take a fair bit of time to implement. - """ - - def __init__(self, base, bufsize=None): - self.base = base - self.bufsize = bufsize - self.reader = None - self.writer = None - - def read(self, n=-1): - if not self.reader: - if self.writer: - self.writer.flush() - self.writer = None - self.reader = BufferingInputStream(self.base, self.bufsize) - return self.reader.read(n) - - def write(self, data): - if not self.writer: - if self.reader: - # Make sure the underlying file has the correct current - # position - self.reader.seek(self.reader.tell()) - self.reader = None - self.writer = BufferingOutputStream(self.base, self.bufsize) - return self.writer.write(data) - - def truncate(self, size=None): - if not self.writer: - if self.reader: - # Make sure the underlying file has the correct current - # position - self.reader.seek(self.reader.tell()) - self.reader = None - self.writer = BufferingOutputStream(self.base, self.bufsize) - return self.writer.truncate(size) - - def __getattr__(self, name): - """ - Delegate all other methods to the underlying file object. - """ - if not self.reader and not self.writer: - self.reader = BufferingInputStream(self.base, self.bufsize) - - if self.reader: - return getattr(self.reader, name) - - return getattr(self.writer, name) - -class CRLFFilter(Stream): - - """Filtering stream for universal newlines. - - TextInputFilter is more general, but this is faster when you don't - need tell/seek. - """ - - def __init__(self, base): - self.do_read = base.read - self.atcr = False - self.close = base.close - - def read(self, n): - data = self.do_read(n) - if self.atcr: - if data.startswith("\n"): - data = data[1:] # Very rare case: in the middle of "\r\n" - self.atcr = False - if "\r" in data: - self.atcr = data.endswith("\r") # Test this before removing \r - data = data.replace("\r\n", "\n") # Catch \r\n this first - data = data.replace("\r", "\n") # Remaining \r are standalone - return data - -class MMapFile(object): - - """Standard I/O basis stream using mmap.""" - - def __init__(self, filename, mode="r"): - self.filename = filename - self.mode = mode - if mode == "r": - flag = os.O_RDONLY - self.access = mmap.ACCESS_READ - else: - if mode == "w": - flag = os.O_RDWR | os.O_CREAT - elif mode == "a": - flag = os.O_RDWR - else: - raise ValueError, "mode should be 'r', 'w' or 'a'" - self.access = mmap.ACCESS_WRITE - if hasattr(os, "O_BINARY"): - flag |= os.O_BINARY - self.fd = os.open(filename, flag) - size = os.fstat(self.fd).st_size - self.mm = mmap.mmap(self.fd, size, access=self.access) - self.pos = 0 - - def __del__(self): - self.close() - - mm = fd = None - - def close(self): - if self.mm is not None: - self.mm.close() - self.mm = None - if self.fd is not None: - os.close(self.fd) - self.fd = None - - def tell(self): - return self.pos - - def seek(self, offset, whence=0): - if whence == 0: - self.pos = max(0, offset) - elif whence == 1: - self.pos = max(0, self.pos + offset) - elif whence == 2: - self.pos = max(0, self.mm.size() + offset) - else: - raise ValueError, "seek(): whence must be 0, 1 or 2" - - def readall(self): - return self.read() - - def read(self, n=-1): - if n >= 0: - aim = self.pos + n - else: - aim = self.mm.size() # Actual file size, may be more than mapped - n = aim - self.pos - data = self.mm[self.pos:aim] - if len(data) < n: - del data - # File grew since opened; remap to get the new data - size = os.fstat(self.fd).st_size - self.mm = mmap.mmap(self.fd, size, access=self.access) - data = self.mm[self.pos:aim] - self.pos += len(data) - return data - - def __iter__(self): - return self - - def readline(self): - hit = self.mm.find("\n", self.pos) + 1 - if hit: - data = self.mm[self.pos:hit] - self.pos = hit - return data - # Remap the file just in case - size = os.fstat(self.fd).st_size - self.mm = mmap.mmap(self.fd, size, access=self.access) - hit = self.mm.find("\n", self.pos) + 1 - if hit: - # Got a whole line after remapping - data = self.mm[self.pos:hit] - self.pos = hit - return data - # Read whatever we've got -- may be empty - data = self.mm[self.pos:self.mm.size()] - self.pos += len(data) - return data - - def next(self): - hit = self.mm.find("\n", self.pos) + 1 - if hit: - data = self.mm[self.pos:hit] - self.pos = hit - return data - # Remap the file just in case - size = os.fstat(self.fd).st_size - self.mm = mmap.mmap(self.fd, size, access=self.access) - hit = self.mm.find("\n", self.pos) + 1 - if hit: - # Got a whole line after remapping - data = self.mm[self.pos:hit] - self.pos = hit - return data - # Read whatever we've got -- may be empty - data = self.mm[self.pos:self.mm.size()] - if not data: - raise StopIteration - self.pos += len(data) - return data - - def readlines(self, sizehint=0): - return list(iter(self.readline, "")) - - def write(self, data): - end = self.pos + len(data) - try: - self.mm[self.pos:end] = data - # This can raise IndexError on Windows, ValueError on Unix - except (IndexError, ValueError): - # XXX On Unix, this resize() call doesn't work - self.mm.resize(end) - self.mm[self.pos:end] = data - self.pos = end - - def writelines(self, lines): - filter(self.write, lines) - -class DiskFile(object): - - """Standard I/O basis stream using os.open/close/read/write/lseek""" - - # This is not quite correct, since more letters are allowed after - # these. However, the following are the only starting strings allowed - # in the mode parameter. - modes = { - 'r' : os.O_RDONLY, - 'rb' : os.O_RDONLY, - 'rU' : os.O_RDONLY, - 'U' : os.O_RDONLY, - 'w' : os.O_WRONLY | os.O_CREAT | os.O_TRUNC, - 'wb' : os.O_WRONLY | os.O_CREAT | os.O_TRUNC, - 'a' : os.O_WRONLY | os.O_CREAT | os.O_EXCL, - 'ab' : os.O_WRONLY | os.O_CREAT | os.O_EXCL, - 'r+' : os.O_RDWR, - 'rb+': os.O_RDWR, - 'r+b': os.O_RDWR, - 'w+' : os.O_RDWR | os.O_CREAT | os.O_TRUNC, - 'wb+': os.O_RDWR | os.O_CREAT | os.O_TRUNC, - 'w+b': os.O_RDWR | os.O_CREAT | os.O_TRUNC, - 'a+' : os.O_RDWR | os.O_CREAT | os.O_EXCL, - 'ab+': os.O_RDWR | os.O_CREAT | os.O_EXCL, - 'a+b': os.O_RDWR | os.O_CREAT | os.O_EXCL, - } - def __init__(self, filename, mode="r"): - self.filename = filename - self.mode = mode - try: - flag = DiskFile.modes[mode] - except KeyError: - raise ValueError, "mode should be 'r', 'r+', 'w', 'w+' or 'a+'" - - O_BINARY = getattr(os, "O_BINARY", 0) - flag |= O_BINARY - try: - self.fd = os.open(filename, flag) - except OSError: - # Opening in mode 'a' or 'a+' and file already exists - flag = flag & (os.O_RDWR | O_BINARY) - self.fd = os.open(filename, flag) - if mode[0] == 'a': - os.lseek(self.fd, 0, 2) # Move to end of file - - def seek(self, offset, whence=0): - os.lseek(self.fd, offset, whence) - - def tell(self): - return os.lseek(self.fd, 0, 1) - - def read(self, n): - return os.read(self.fd, n) - - def write(self, data): - while data: - n = os.write(self.fd, data) - data = data[n:] - - def close(self): - fd = self.fd - if fd is not None: - self.fd = None - os.close(fd) - - def truncate(self, size=None): - if size is None: - size = self.tell() - if os.name == 'posix': - os.ftruncate(self.fd, size) - else: - raise NotImplementedError - - def isatty(self): - if os.name == 'posix': - return os.isatty(self.fd) - else: - raise NotImplementedError - - def fileno(): - return self.fd - - def __del__(self): - try: - self.close() - except: - pass - -class TextInputFilter(Stream): - - """Filtering input stream for universal newline translation.""" - - def __init__(self, base): - self.base = base # must implement read, may implement tell, seek - self.atcr = False # Set when last char read was \r - self.buf = "" # Optional one-character read-ahead buffer - self.close = base.close - self.CR = False - self.NL = False - self.CRLF = False - - def __getattr__(self, name): - if name == 'newlines': - foundchars = self.CR * 1 + self.NL * 2 + self.CRLF * 4 - if not foundchars: - return None - if foundchars in [1, 2, 4]: - if self.CR: - return '\r' - elif self.NL: - return '\n' - else: - return '\r\n' - else: - result = [] - if self.CR: - result.append('\r') - if self.NL: - result.append('\n') - if self.CRLF: - result.append('\r\n') - return tuple(result) - - def read(self, n): - """Read up to n bytes.""" - if n <= 0: - return "" - if self.buf: - assert not self.atcr - data = self.buf - self.buf = "" - return data - data = self.base.read(n) - - # The following whole ugly mess is because we need to keep track of - # exactly which line separators we have seen for self.newlines, - # grumble, grumble. This has an interesting corner-case. - # - # Consider a file consisting of exactly one line ending with '\r'. - # The first time you read(), you will not know whether it is a - # CR separator or half of a CRLF separator. Neither will be marked - # as seen, since you are waiting for your next read to determine - # what you have seen. But there's no more to read ... - - if self.atcr: - if data.startswith("\n"): - data = data[1:] - self.CRLF = True - if not data: - data = self.base.read(n) - else: - self.CR = True - self.atcr = False - - for i in range(len(data)): - if data[i] == '\n': - if i > 0 and data[i-1] == '\r': - self.CRLF = True - else: - self.NL = True - elif data[i] == '\r': - if i < len(data)-1 and data[i+1] != '\n': - self.CR = True - - if "\r" in data: - self.atcr = data.endswith("\r") - data = data.replace("\r\n", "\n").replace("\r", "\n") - - return data - - def seek(self, offset, whence=0): - """Seeks based on knowledge that does not come from a tell() - may go to the wrong place, since the number of - characters seen may not match the number of characters - that are actually in the file (where \r\n is the - line separator). Arithmetics on the result - of a tell() that moves beyond a newline character may in the - same way give the wrong result. - """ - self.base.seek(offset, whence) - self.atcr = False - self.buf = "" - - def tell(self): - pos = self.base.tell() - if self.atcr: - # Must read the next byte to see if it's \n, - # because then we must report the next position. - assert not self.buf - self.buf = self.base.read(1) - pos += 1 - self.atcr = False - if self.buf == "\n": - self.buf = "" - return pos - len(self.buf) - -class TextOutputFilter(Stream): - - """Filtering output stream for universal newline translation.""" - - def __init__(self, base, linesep=os.linesep): - assert linesep in ["\n", "\r\n", "\r"] - self.base = base # must implement write, may implement seek, tell - self.linesep = linesep - self.close = base.close - - def write(self, data): - if self.linesep is not "\n" and "\n" in data: - data = data.replace("\n", self.linesep) - self.base.write(data) - - def seek(self, offset, whence=0): - self.base.seek(offset, whence) - - def tell(self): - return self.base.tell() - -class DecodingInputFilter(Stream): - - """Filtering input stream that decodes an encoded file.""" - - def __init__(self, base, encoding="utf8", errors="strict"): - self.base = base - self.encoding = encoding - self.errors = errors - self.tell = base.tell - self.seek = base.seek - self.close = base.close - - def read(self, n): - """Read *approximately* n bytes, then decode them. - - Under extreme circumstances, - the return length could be longer than n! - - Always return a unicode string. - - This does *not* translate newlines; - you can stack TextInputFilter. - """ - data = self.base.read(n) - try: - return data.decode(self.encoding, self.errors) - except ValueError: - # XXX Sigh. decode() doesn't handle incomplete strings well. - # Use the retry strategy from codecs.StreamReader. - for i in range(9): - more = self.base.read(1) - if not more: - raise - data += more - try: - return data.decode(self.encoding, self.errors) - except ValueError: - pass - raise - -class EncodingOutputFilter(Stream): - - """Filtering output stream that writes to an encoded file.""" - - def __init__(self, base, encoding="utf8", errors="strict"): - self.base = base - self.encoding = encoding - self.errors = errors - self.tell = base.tell - self.seek = base.seek - self.close = base.close - - def write(self, chars): - if isinstance(chars, str): - chars = unicode(chars) # Fail if it's not ASCII - self.base.write(chars.encode(self.encoding, self.errors)) diff --git a/pypy/appspace/sre_parse.py b/pypy/appspace/sre_parse.py deleted file mode 100644 index db2982f6af..0000000000 --- a/pypy/appspace/sre_parse.py +++ /dev/null @@ -1,742 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# convert re-style regular expression to sre pattern -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# See the sre.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -# XXX: show string offset and offending character for all errors - -# this module works under 1.5.2 and later. don't use string methods -import string, sys - -from sre_constants import * - -SPECIAL_CHARS = ".\\[{()*+?^$|" -REPEAT_CHARS = "*+?{" - -DIGITS = tuple("0123456789") - -OCTDIGITS = tuple("01234567") -HEXDIGITS = tuple("0123456789abcdefABCDEF") - -WHITESPACE = tuple(" \t\n\r\v\f") - -ESCAPES = { - r"\a": (LITERAL, ord("\a")), - r"\b": (LITERAL, ord("\b")), - r"\f": (LITERAL, ord("\f")), - r"\n": (LITERAL, ord("\n")), - r"\r": (LITERAL, ord("\r")), - r"\t": (LITERAL, ord("\t")), - r"\v": (LITERAL, ord("\v")), - r"\\": (LITERAL, ord("\\")) -} - -CATEGORIES = { - r"\A": (AT, AT_BEGINNING_STRING), # start of string - r"\b": (AT, AT_BOUNDARY), - r"\B": (AT, AT_NON_BOUNDARY), - r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), - r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), - r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), - r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), - r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), - r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), - r"\Z": (AT, AT_END_STRING), # end of string -} - -FLAGS = { - # standard flags - "i": SRE_FLAG_IGNORECASE, - "L": SRE_FLAG_LOCALE, - "m": SRE_FLAG_MULTILINE, - "s": SRE_FLAG_DOTALL, - "x": SRE_FLAG_VERBOSE, - # extensions - "t": SRE_FLAG_TEMPLATE, - "u": SRE_FLAG_UNICODE, -} - -# figure out best way to convert hex/octal numbers to integers -try: - int("10", 8) - atoi = int # 2.0 and later -except TypeError: - atoi = string.atoi # 1.5.2 - -class Pattern: - # master pattern object. keeps track of global attributes - def __init__(self): - self.flags = 0 - self.open = [] - self.groups = 1 - self.groupdict = {} - def opengroup(self, name=None): - gid = self.groups - self.groups = gid + 1 - if name is not None: - ogid = self.groupdict.get(name, None) - if ogid is not None: - raise error, ("redefinition of group name %s as group %d; " - "was group %d" % (repr(name), gid, ogid)) - self.groupdict[name] = gid - self.open.append(gid) - return gid - def closegroup(self, gid): - self.open.remove(gid) - def checkgroup(self, gid): - return gid < self.groups and gid not in self.open - -class SubPattern: - # a subpattern, in intermediate form - def __init__(self, pattern, data=None): - self.pattern = pattern - if data is None: - data = [] - self.data = data - self.width = None - def dump(self, level=0): - nl = 1 - for op, av in self.data: - print level*" " + op,; nl = 0 - if op == "in": - # member sublanguage - print; nl = 1 - for op, a in av: - print (level+1)*" " + op, a - elif op == "branch": - print; nl = 1 - i = 0 - for a in av[1]: - if i > 0: - print level*" " + "or" - a.dump(level+1); nl = 1 - i = i + 1 - elif type(av) in (type(()), type([])): - for a in av: - if isinstance(a, SubPattern): - if not nl: print - a.dump(level+1); nl = 1 - else: - print a, ; nl = 0 - else: - print av, ; nl = 0 - if not nl: print - def __repr__(self): - return repr(self.data) - def __len__(self): - return len(self.data) - def __iter__(self): - return iter(self.data) - def __delitem__(self, index): - del self.data[index] - def __getitem__(self, index): - if isinstance(index, slice): - return SubPattern(self.pattern, self.data[index]) - else: - return self.data[index] - def __setitem__(self, index, code): - self.data[index] = code - def insert(self, index, code): - self.data.insert(index, code) - def append(self, code): - self.data.append(code) - def getwidth(self): - # determine the width (min, max) for this subpattern - if self.width: - return self.width - lo = hi = 0L - for op, av in self.data: - if op is BRANCH: - i = sys.maxint - j = 0 - for av in av[1]: - l, h = av.getwidth() - i = min(i, l) - j = max(j, h) - lo = lo + i - hi = hi + j - elif op is CALL: - i, j = av.getwidth() - lo = lo + i - hi = hi + j - elif op is SUBPATTERN: - i, j = av[1].getwidth() - lo = lo + i - hi = hi + j - elif op in (MIN_REPEAT, MAX_REPEAT): - i, j = av[2].getwidth() - lo = lo + long(i) * av[0] - hi = hi + long(j) * av[1] - elif op in (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY): - lo = lo + 1 - hi = hi + 1 - elif op == SUCCESS: - break - self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint)) - return self.width - -class Tokenizer: - def __init__(self, string): - self.string = string - self.index = 0 - self.__next() - def __next(self): - if self.index >= len(self.string): - self.next = None - return - char = self.string[self.index] - if char[0] == "\\": - try: - c = self.string[self.index + 1] - except IndexError: - raise error, "bogus escape (end of line)" - char = char + c - self.index = self.index + len(char) - self.next = char - def match(self, char, skip=1): - if char == self.next: - if skip: - self.__next() - return 1 - return 0 - def get(self): - this = self.next - self.__next() - return this - def tell(self): - return self.index, self.next - def seek(self, index): - self.index, self.next = index - -def isident(char): - return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_" - -def isdigit(char): - return "0" <= char <= "9" - -def isname(name): - # check that group name is a valid string - if not isident(name[0]): - return False - for char in name: - if not isident(char) and not isdigit(char): - return False - return True - -def _group(escape, groups): - # check if the escape string represents a valid group - try: - gid = atoi(escape[1:]) - if gid and gid < groups: - return gid - except ValueError: - pass - return None # not a valid group - -def _class_escape(source, escape): - # handle escape code inside character class - code = ESCAPES.get(escape) - if code: - return code - code = CATEGORIES.get(escape) - if code: - return code - try: - if escape[1:2] == "x": - # hexadecimal escape (exactly two digits) - while source.next in HEXDIGITS and len(escape) < 4: - escape = escape + source.get() - escape = escape[2:] - if len(escape) != 2: - raise error, "bogus escape: %s" % repr("\\" + escape) - return LITERAL, atoi(escape, 16) & 0xff - elif escape[1:2] in OCTDIGITS: - # octal escape (up to three digits) - while source.next in OCTDIGITS and len(escape) < 5: - escape = escape + source.get() - escape = escape[1:] - return LITERAL, atoi(escape, 8) & 0xff - if len(escape) == 2: - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise error, "bogus escape: %s" % repr(escape) - -def _escape(source, escape, state): - # handle escape code in expression - code = CATEGORIES.get(escape) - if code: - return code - code = ESCAPES.get(escape) - if code: - return code - try: - if escape[1:2] == "x": - # hexadecimal escape - while source.next in HEXDIGITS and len(escape) < 4: - escape = escape + source.get() - if len(escape) != 4: - raise ValueError - return LITERAL, atoi(escape[2:], 16) & 0xff - elif escape[1:2] == "0": - # octal escape - while source.next in OCTDIGITS and len(escape) < 4: - escape = escape + source.get() - return LITERAL, atoi(escape[1:], 8) & 0xff - elif escape[1:2] in DIGITS: - # octal escape *or* decimal group reference (sigh) - if source.next in DIGITS: - escape = escape + source.get() - if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and - source.next in OCTDIGITS): - # got three octal digits; this is an octal escape - escape = escape + source.get() - return LITERAL, atoi(escape[1:], 8) & 0xff - # got at least one decimal digit; this is a group reference - group = _group(escape, state.groups) - if group: - if not state.checkgroup(group): - raise error, "cannot refer to open group" - return GROUPREF, group - raise ValueError - if len(escape) == 2: - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise error, "bogus escape: %s" % repr(escape) - -def _parse_sub(source, state, nested=1): - # parse an alternation: a|b|c - - items = [] - while 1: - items.append(_parse(source, state)) - if source.match("|"): - continue - if not nested: - break - if not source.next or source.match(")", 0): - break - else: - raise error, "pattern not properly closed" - - if len(items) == 1: - return items[0] - - subpattern = SubPattern(state) - - # check if all items share a common prefix - while 1: - prefix = None - for item in items: - if not item: - break - if prefix is None: - prefix = item[0] - elif item[0] != prefix: - break - else: - # all subitems start with a common "prefix". - # move it out of the branch - for item in items: - del item[0] - subpattern.append(prefix) - continue # check next one - break - - # check if the branch can be replaced by a character set - for item in items: - if len(item) != 1 or item[0][0] != LITERAL: - break - else: - # we can store this as a character set instead of a - # branch (the compiler may optimize this even more) - set = [] - for item in items: - set.append(item[0]) - subpattern.append((IN, set)) - return subpattern - - subpattern.append((BRANCH, (None, items))) - return subpattern - -def _parse(source, state): - # parse a simple pattern - - subpattern = SubPattern(state) - - while 1: - - if source.next in ("|", ")"): - break # end of subpattern - this = source.get() - if this is None: - break # end of pattern - - if state.flags & SRE_FLAG_VERBOSE: - # skip whitespace and comments - if this in WHITESPACE: - continue - if this == "#": - while 1: - this = source.get() - if this in (None, "\n"): - break - continue - - if this and this[0] not in SPECIAL_CHARS: - subpattern.append((LITERAL, ord(this))) - - elif this == "[": - # character set - set = [] -## if source.match(":"): -## pass # handle character classes - if source.match("^"): - set.append((NEGATE, None)) - # check remaining characters - start = set[:] - while 1: - this = source.get() - if this == "]" and set != start: - break - elif this and this[0] == "\\": - code1 = _class_escape(source, this) - elif this: - code1 = LITERAL, ord(this) - else: - raise error, "unexpected end of regular expression" - if source.match("-"): - # potential range - this = source.get() - if this == "]": - if code1[0] is IN: - code1 = code1[1][0] - set.append(code1) - set.append((LITERAL, ord("-"))) - break - elif this: - if this[0] == "\\": - code2 = _class_escape(source, this) - else: - code2 = LITERAL, ord(this) - if code1[0] != LITERAL or code2[0] != LITERAL: - raise error, "bad character range" - lo = code1[1] - hi = code2[1] - if hi < lo: - raise error, "bad character range" - set.append((RANGE, (lo, hi))) - else: - raise error, "unexpected end of regular expression" - else: - if code1[0] is IN: - code1 = code1[1][0] - set.append(code1) - - # XXX: <fl> should move set optimization to compiler! - if len(set)==1 and set[0][0] is LITERAL: - subpattern.append(set[0]) # optimization - elif len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL: - subpattern.append((NOT_LITERAL, set[1][1])) # optimization - else: - # XXX: <fl> should add charmap optimization here - subpattern.append((IN, set)) - - elif this and this[0] in REPEAT_CHARS: - # repeat previous item - if this == "?": - min, max = 0, 1 - elif this == "*": - min, max = 0, MAXREPEAT - - elif this == "+": - min, max = 1, MAXREPEAT - elif this == "{": - here = source.tell() - min, max = 0, MAXREPEAT - lo = hi = "" - while source.next in DIGITS: - lo = lo + source.get() - if source.match(","): - while source.next in DIGITS: - hi = hi + source.get() - else: - hi = lo - if not source.match("}"): - subpattern.append((LITERAL, ord(this))) - source.seek(here) - continue - if lo: - min = atoi(lo) - if hi: - max = atoi(hi) - if max < min: - raise error, "bad repeat interval" - else: - raise error, "not supported" - # figure out which item to repeat - if subpattern: - item = subpattern[-1:] - else: - item = None - if not item or (len(item) == 1 and item[0][0] == AT): - raise error, "nothing to repeat" - if item[0][0] in (MIN_REPEAT, MAX_REPEAT): - raise error, "multiple repeat" - if source.match("?"): - subpattern[-1] = (MIN_REPEAT, (min, max, item)) - else: - subpattern[-1] = (MAX_REPEAT, (min, max, item)) - - elif this == ".": - subpattern.append((ANY, None)) - - elif this == "(": - group = 1 - name = None - if source.match("?"): - group = 0 - # options - if source.match("P"): - # python extensions - if source.match("<"): - # named group: skip forward to end of name - name = "" - while 1: - char = source.get() - if char is None: - raise error, "unterminated name" - if char == ">": - break - name = name + char - group = 1 - if not isname(name): - raise error, "bad character in group name" - elif source.match("="): - # named backreference - name = "" - while 1: - char = source.get() - if char is None: - raise error, "unterminated name" - if char == ")": - break - name = name + char - if not isname(name): - raise error, "bad character in group name" - gid = state.groupdict.get(name) - if gid is None: - raise error, "unknown group name" - subpattern.append((GROUPREF, gid)) - continue - else: - char = source.get() - if char is None: - raise error, "unexpected end of pattern" - raise error, "unknown specifier: ?P%s" % char - elif source.match(":"): - # non-capturing group - group = 2 - elif source.match("#"): - # comment - while 1: - if source.next is None or source.next == ")": - break - source.get() - if not source.match(")"): - raise error, "unbalanced parenthesis" - continue - elif source.next in ("=", "!", "<"): - # lookahead assertions - char = source.get() - dir = 1 - if char == "<": - if source.next not in ("=", "!"): - raise error, "syntax error" - dir = -1 # lookbehind - char = source.get() - p = _parse_sub(source, state) - if not source.match(")"): - raise error, "unbalanced parenthesis" - if char == "=": - subpattern.append((ASSERT, (dir, p))) - else: - subpattern.append((ASSERT_NOT, (dir, p))) - continue - else: - # flags - if not source.next in FLAGS: - raise error, "unexpected end of pattern" - while source.next in FLAGS: - state.flags = state.flags | FLAGS[source.get()] - if group: - # parse group contents - if group == 2: - # anonymous group - group = None - else: - group = state.opengroup(name) - p = _parse_sub(source, state) - if not source.match(")"): - raise error, "unbalanced parenthesis" - if group is not None: - state.closegroup(group) - subpattern.append((SUBPATTERN, (group, p))) - else: - while 1: - char = source.get() - if char is None: - raise error, "unexpected end of pattern" - if char == ")": - break - raise error, "unknown extension" - - elif this == "^": - subpattern.append((AT, AT_BEGINNING)) - - elif this == "$": - subpattern.append((AT, AT_END)) - - elif this and this[0] == "\\": - code = _escape(source, this, state) - subpattern.append(code) - - else: - raise error, "parser error" - - return subpattern - -def parse(str, flags=0, pattern=None): - # parse 're' pattern into list of (opcode, argument) tuples - - source = Tokenizer(str) - - if pattern is None: - pattern = Pattern() - pattern.flags = flags - pattern.str = str - - p = _parse_sub(source, pattern, 0) - - tail = source.get() - if tail == ")": - raise error, "unbalanced parenthesis" - elif tail: - raise error, "bogus characters at end of regular expression" - - if flags & SRE_FLAG_DEBUG: - p.dump() - - if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE: - # the VERBOSE flag was switched on inside the pattern. to be - # on the safe side, we'll parse the whole thing again... - return parse(str, p.pattern.flags) - - return p - -def parse_template(source, pattern): - # parse 're' replacement string into list of literals and - # group references - s = Tokenizer(source) - p = [] - a = p.append - def literal(literal, p=p): - if p and p[-1][0] is LITERAL: - p[-1] = LITERAL, p[-1][1] + literal - else: - p.append((LITERAL, literal)) - sep = source[:0] - if type(sep) is type(""): - makechar = chr - else: - makechar = unichr - while 1: - this = s.get() - if this is None: - break # end of replacement string - if this and this[0] == "\\": - # group - if this == "\\g": - name = "" - if s.match("<"): - while 1: - char = s.get() - if char is None: - raise error, "unterminated group name" - if char == ">": - break - name = name + char - if not name: - raise error, "bad group name" - try: - index = atoi(name) - except ValueError: - if not isname(name): - raise error, "bad character in group name" - try: - index = pattern.groupindex[name] - except KeyError: - raise IndexError, "unknown group name" - a((MARK, index)) - elif len(this) > 1 and this[1] in DIGITS: - code = None - while 1: - group = _group(this, pattern.groups+1) - if group: - if (s.next not in DIGITS or - not _group(this + s.next, pattern.groups+1)): - code = MARK, group - break - elif s.next in OCTDIGITS: - this = this + s.get() - else: - break - if not code: - this = this[1:] - code = LITERAL, makechar(atoi(this[-6:], 8) & 0xff) - if code[0] is LITERAL: - literal(code[1]) - else: - a(code) - else: - try: - this = makechar(ESCAPES[this][1]) - except KeyError: - pass - literal(this) - else: - literal(this) - # convert template to groups and literals lists - i = 0 - groups = [] - literals = [] - for c, s in p: - if c is MARK: - groups.append((i, s)) - literals.append(None) - else: - literals.append(s) - i = i + 1 - return groups, literals - -def expand_template(template, match): - g = match.group - sep = match.string[:0] - groups, literals = template - literals = literals[:] - try: - for index, group in groups: - literals[index] = s = g(group) - if s is None: - raise IndexError - except IndexError: - raise error, "empty group" - return string.join(literals, sep) diff --git a/pypy/appspace/struct.py b/pypy/appspace/struct.py deleted file mode 100644 index f82e31d20b..0000000000 --- a/pypy/appspace/struct.py +++ /dev/null @@ -1,346 +0,0 @@ -import math,sys - -"""Functions to convert between Python values and C structs. -Python strings are used to hold the data representing the C struct -and also as format strings to describe the layout of data in the C struct. - -The optional first format char indicates byte order, size and alignment: - @: native order, size & alignment (default) - =: native order, std. size & alignment - <: little-endian, std. size & alignment - >: big-endian, std. size & alignment - !: same as > - -The remaining chars indicate types of args and must match exactly; -these can be preceded by a decimal repeat count: - x: pad byte (no data); - c:char; - b:signed byte; - B:unsigned byte; - h:short; - H:unsigned short; - i:int; - I:unsigned int; - l:long; - L:unsigned long; - f:float; - d:double. -Special cases (preceding decimal count indicates length): - s:string (array of char); p: pascal string (with count byte). -Special case (only available in native format): - P:an integer type that is wide enough to hold a pointer. -Special case (not in native mode unless 'long long' in platform C): - q:long long; - Q:unsigned long long -Whitespace between formats is ignored. - -The variable struct.error is an exception raised on errors.""" - -# TODO: XXX Find a way to get information on native sizes and alignments -class StructError(Exception): - pass -error = StructError -def unpack_int(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if le == 'little': - bytes.reverse() - number = 0 - for b in bytes: - number = number << 8 | b - return number - -def unpack_signed_int(data,index,size,le): - number = unpack_int(data,index,size,le) - max = 2**(size*8) - if number > 2**(size*8 - 1) - 1: - number = -1*(max - number) - return number - -def unpack_float(data,index,size,le): - bytes = [ord(b) for b in data[index:index+size]] - if len(bytes) != size: - raise StructError,"Not enough data to unpack" - if le == 'big': - bytes.reverse() - if size == 4: - bias = 127 - exp = 8 - prec = 23 - else: - bias = 1023 - exp = 11 - prec = 52 -# print bytes,size,index,len(data),data - mantissa = bytes[size-2] & (2**(15-exp)-1) -# print mantissa - for b in bytes[size-3::-1]: - mantissa = mantissa << 8 | b -# print mantissa - mantissa = 1 + (1.0*mantissa)/(2**(prec)) - mantissa /= 2 -# print mantissa - e = (bytes[-1] & 0x7f) << (exp - 7) - e += (bytes[size-2] >> (15 - exp)) & (2**(exp - 7) -1) - e -= bias - e += 1 - sign = bytes[-1] & 0x80 - number = math.ldexp(mantissa,e) -# print number,index,mantissa,e,bytes#,data - if sign : number *= -1 - return number - -def unpack_char(data,index,size,le): - return data[index:index+size] - -def pack_int(number,size,le): - x=number - res=[] - for i in range(size): - res.append(chr(x&0xff)) - x >>= 8 - if le == 'big': - res.reverse() - return ''.join(res) - -def pack_signed_int(number,size,le): - if type(number) not in [int,long]: - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): - raise OverflowError,"Number:%i to large to convert" % number - return pack_int(number,size,le) - -def pack_unsigned_int(number,size,le): - if type(number) not in [int,long]: - raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer" - if number < 0: - raise TypeError,"can't convert negative long to unsigned" - if number > 2**(8*size)-1: - raise OverflowError,"Number:%i to large to convert" % number - return pack_int(number,size,le) - -def pack_char(char,size,le): -# print char - return str(char) - -def sane_float(man,e): - # TODO: XXX Implement checks for floats - return True - -def pack_float(number,size,le): - - if number < 0: - sign=1 - number *= -1 - else: - sign =0 - if size == 4: - bias = 127 - exp = 8 - prec = 23 - else: - bias = 1023 - exp = 11 - prec = 52 - - man,e = math.frexp(number) - if 0.5 <= man and man < 1.0: - man *= 2 - e -= 1 - if sane_float(man,e): - man -= 1 - e += bias - mantissa = int(2**prec *(man) +0.5) - res=[] - if mantissa >> prec : - mantissa = 0 - e += 1 - - for i in range(size-2): - res += [ mantissa & 0xff] - mantissa >>= 8 - res += [ (mantissa & (2**(15-exp)-1)) | ((e & (2**(exp-7)-1))<<(15-exp))] - res += [sign << 7 | e >> (exp - 7)] - if le == 'big': - res.reverse() - return ''.join([chr(x) for x in res]) - # TODO: What todo with insane floats/doubles. handle in sanefloat? - -big_endian_format = { - 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, - 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, - 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, - 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, - 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, - } -default = big_endian_format -formatmode={ '<' : (default, 'little'), - '>' : (default, 'big'), - '!' : (default, 'big'), - '=' : (default, sys.byteorder), - '@' : (default, sys.byteorder) - } - -def getmode(fmt): - try: - formatdef,endianness = formatmode[fmt[0]] - index = 1 - except KeyError: - formatdef,endianness = formatmode['@'] - index = 0 - return formatdef,endianness,index -def getNum(fmt,i): - num=None - cur = fmt[i] - while ('0'<= cur ) and ( cur <= '9'): - if num == None: - num = int(cur) - else: - num = 10*num + int(cur) - i += 1 - cur = fmt[i] - return num,i - -def calcsize(fmt): - """calcsize(fmt) -> int - Return size of C struct described by format string fmt. - See struct.__doc__ for more on format strings.""" - - formatdef,endianness,i = getmode(fmt) - num = 0 - result = 0 - while i<len(fmt): - num,i = getNum(fmt,i) - cur = fmt[i] - try: - format = formatdef[cur] - except KeyError: - raise StructError,"%s is not a valid format"%cur - if num != None : - result += num*format['size'] - else: - result += format['size'] - num = 0 - i += 1 - return result - -def pack(fmt,*args): - """pack(fmt, v1, v2, ...) -> string - Return string containing values v1, v2, ... packed according to fmt. - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) - args = list(args) - n_args = len(args) - result = [] - while i<len(fmt): - num,i = getNum(fmt,i) - cur = fmt[i] - try: - format = formatdef[cur] - except KeyError: - raise StructError,"%s is not a valid format"%cur - if num == None : - num_s = 0 - num = 1 - else: - num_s = num - - if cur == 'x': - result += ['\0'*num] - elif cur == 's': - if type(args[0]) == str: - padding = num - len(args[0]) - if padding >=0: - result += [args[0][:num] + '\0'*padding] - else: - result += [args[0][:num]] - args.pop() - else: - raise StructError,"arg for string format not a string" - elif cur == 'p': - if type(args[0]) == str: - padding = num - len(args[0]) - 1 - - if padding > 0: - result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding] - else: - if num<255: - result += [chr(num-1) + args[0][:num-1]] - else: - result += [chr(255) + args[0][:num-1]] - args.pop() - else: - raise StructError,"arg for string format not a string" - - else: - if len(args) == 0: - raise StructError,"insufficient arguments to pack" - for var in args[:num]: - result += [format['pack'](var,format['size'],endianness)] - args=args[num:] - num = None - i += 1 - if len(args) != 0: - raise StructError,"too many arguments for pack format" - return ''.join(result) - -def unpack(fmt,data): - """unpack(fmt, string) -> (v1, v2, ...) - Unpack the string, containing packed C structure data, according - to fmt. Requires len(string)==calcsize(fmt). - See struct.__doc__ for more on format strings.""" - formatdef,endianness,i = getmode(fmt) -# print fmt,data - j = 0 - num = 0 - result = [] - length= calcsize(fmt) - if length != len (data): - raise StructError,"unpack str size does not match format" - while i<len(fmt): - num,i=getNum(fmt,i) - cur = fmt[i] - try: - format = formatdef[cur] - except KeyError: - raise StructError,"%s is not a valid format"%cur - - if not num : - num = 1 - if cur == 'x': - j += num - i += 1 - elif cur == 's': - result.append(data[j:j+num]) - j += num - i += 1 - elif cur == 'p': - n=ord(data[j]) - if n >= num: - n = num-1 - result.append(data[j+1:j+n+1]) - j += num - i += 1 - else: - for n in range(num): - result += [format['unpack'](data,j,format['size'],endianness)] - j += format['size'] - i += 1 - - return result - -if __name__ == '__main__': - print pack_float(1.23,4,'little') - import struct - print struct.pack('f',1.23), pack('f',1.23) - print unpack('f',pack('f',1.23))
\ No newline at end of file diff --git a/pypy/appspace/test/__init__.py b/pypy/appspace/test/__init__.py deleted file mode 100644 index 1bb8bf6d7f..0000000000 --- a/pypy/appspace/test/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/pypy/appspace/test/autopath.py b/pypy/appspace/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/appspace/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/appspace/test/builtin_functions_test.py b/pypy/appspace/test/builtin_functions_test.py deleted file mode 100644 index d7cdce6c8f..0000000000 --- a/pypy/appspace/test/builtin_functions_test.py +++ /dev/null @@ -1,1234 +0,0 @@ -# Python test set -- built-in functions -import autopath -from pypy.interpreter.gateway import app2interp_temp - -def app_init_globals(): - ''' support functionality for these tests ''' - import __builtin__ as b - - from sets import Set - from support_tests import fcmp, have_unicode, TESTFN, unlink - - if not have_unicode: - b.basestring = str - - import sys, cStringIO - - class Squares: - - def __init__(self, max): - self.max = max - self.sofar = [] - - def __len__(self): return len(self.sofar) - - def __getitem__(self, i): - if not 0 <= i < self.max: raise IndexError - n = len(self.sofar) - while n <= i: - self.sofar.append(n*n) - n += 1 - return self.sofar[i] - - class StrSquares: - - def __init__(self, max): - self.max = max - self.sofar = [] - - def __len__(self): - return len(self.sofar) - - def __getitem__(self, i): - if not 0 <= i < self.max: - raise IndexError - n = len(self.sofar) - while n <= i: - self.sofar.append(str(n*n)) - n += 1 - return self.sofar[i] - - class BitBucket: - def write(self, line): - pass - - L = [ - ('0', 0), - ('1', 1), - ('9', 9), - ('10', 10), - ('99', 99), - ('100', 100), - ('314', 314), - (' 314', 314), - ('314 ', 314), - (' \t\t 314 \t\t ', 314), - (`sys.maxint`, sys.maxint), - (' 1x', ValueError), - (' 1 ', 1), - (' 1\02 ', ValueError), - ('', ValueError), - (' ', ValueError), - (' \t\t ', ValueError) - ] - if have_unicode: - L += [ - (unicode('0'), 0), - (unicode('1'), 1), - (unicode('9'), 9), - (unicode('10'), 10), - (unicode('99'), 99), - (unicode('100'), 100), - (unicode('314'), 314), - (unicode(' 314'), 314), - (unicode('\u0663\u0661\u0664 ','raw-unicode-escape'), 314), - (unicode(' \t\t 314 \t\t '), 314), - (unicode(' 1x'), ValueError), - (unicode(' 1 '), 1), - (unicode(' 1\02 '), ValueError), - (unicode(''), ValueError), - (unicode(' '), ValueError), - (unicode(' \t\t '), ValueError), - (unichr(0x200), ValueError), - ] - b.Set = Set - b.fcmp = fcmp - b.have_unicode = have_unicode - b.TESTFN = TESTFN - b.unlink = unlink - b.sys = sys - b.cStringIO = cStringIO - b.Squares = Squares - b.StrSquares = StrSquares - b.BitBucket = BitBucket - b.L = L - - -class AppTestBuiltin: - objspacename = 'std' - - full_test = 1 - - def setup_class(cls): - app2interp_temp(app_init_globals)(cls.space) - - # we use "if 1:" to keep all method definitions indented, making - # it maximally easy to edit this file to pick and choose which - # ones to run (running everything takes 4 minutes or so...) - if 1: - def test_import(self): - __import__('sys') - __import__('time') - __import__('string') - raises(ImportError, __import__, 'spamspam') - raises(TypeError, __import__, 1, 2, 3, 4) - - def test_abs(self): - # int - assert abs(0) == 0 - assert abs(1234) == 1234 - assert abs(-1234) == 1234 - # float - assert abs(0.0) == 0.0 - assert abs(3.14) == 3.14 - assert abs(-3.14) == 3.14 - # long - assert abs(0L) == 0L - assert abs(1234L) == 1234L - assert abs(-1234L) == 1234L - # str - raises(TypeError, abs, 'a') - - def test_apply(self): - def f0(*args): - assert args == () - def f1(a1): - assert a1 == 1 - def f2(a1, a2): - assert a1 == 1 - assert a2 == 2 - def f3(a1, a2, a3): - assert a1 == 1 - assert a2 == 2 - assert a3 == 3 - apply(f0, ()) - apply(f1, (1,)) - apply(f2, (1, 2)) - apply(f3, (1, 2, 3)) - - # A PyCFunction that takes only positional parameters should allow - # an empty keyword dictionary to pass without a complaint, but - # raise a TypeError if the dictionary is non-empty. - apply(id, (1,), {}) - raises(TypeError, apply, id, (1,), {"foo": 1}) - raises(TypeError, apply) - raises(TypeError, apply, id, 42) - raises(TypeError, apply, id, (42,), 42) - - def test_callable(self): - assert callable(len) - def f(): pass - assert callable(f) - class C: - def meth(self): pass - assert callable(C) - x = C() - assert callable(x.meth) - assert not callable(x) - class D(C): - def __call__(self): pass - y = D() - assert callable(y) - y() - - def test_chr(self): - assert chr(32) == ' ' - assert chr(65) == 'A' - assert chr(97) == 'a' - assert chr(0xff) == '\xff' - raises(ValueError, chr, 256) - raises(TypeError, chr) - - def test_cmp(self): - assert cmp(-1, 1) == -1 - assert cmp(1, -1) == 1 - assert cmp(1, 1) == 0 - ''' TODO XXX Circular objects not handled yet - # verify that circular objects are handled - a = []; a.append(a) - b = []; b.append(b) - from UserList import UserList - c = UserList(); c.append(c) - assert cmp(a, b) == 0 - assert cmp(b, c) == 0 - assert cmp(c, a) == 0 - assert cmp(a, c) == 0 - # okay, now break the cycles - a.pop(); b.pop(); c.pop() - ''' - raises(TypeError, cmp) - - ''' TODO: XXX Coerce is not implemented - def test_coerce(self): - assert not fcmp(coerce(1, 1.1), (1.0, 1.1)) - assert coerce(1, 1L) == (1L, 1L) - assert not fcmp(coerce(1L, 1.1), (1.0, 1.1)) - raises(TypeError, coerce) - class BadNumber: - def __coerce__(self, other): - raise ValueError - raises(ValueError, coerce, 42, BadNumber()) - raises(OverflowError, coerce, 0.5, int("12345" * 1000)) - ''' - - def test_compile(self): - compile('print 1\n', '', 'exec') - bom = '\xef\xbb\xbf' - compile(bom + 'print 1\n', '', 'exec') - raises(TypeError, compile) - raises(ValueError, compile, - 'print 42\n', '<string>', 'badmode') - raises(ValueError, compile, - 'print 42\n', '<string>', 'single', 0xff) - if have_unicode: - compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - - def test_delattr(self): - import sys - sys.spam = 1 - delattr(sys, 'spam') - raises(TypeError, delattr) - - def test_dir(self): - x = 1 - assert 'x' in dir() - import sys - assert 'modules' in dir(sys) - raises(TypeError, dir, 42, 42) - - def test_divmod(self): - assert divmod(12, 7) == (1, 5) - assert divmod(-12, 7) == (-2, 2) - assert divmod(12, -7) == (-2, -2) - assert divmod(-12, -7) == (1, -5) - - assert divmod(12L, 7L) == (1L, 5L) - assert divmod(-12L, 7L) == (-2L, 2L) - assert divmod(12L, -7L) == (-2L, -2L) - assert divmod(-12L, -7L) == (1L, -5L) - - assert divmod(12, 7L) == (1, 5L) - assert divmod(-12, 7L) == (-2, 2L) - assert divmod(12L, -7) == (-2L, -2) - assert divmod(-12L, -7) == (1L, -5) - - assert not fcmp(divmod(3.25, 1.0), (3.0, 0.25)) - assert not fcmp(divmod(-3.25, 1.0), (-4.0, 0.75)) - assert not fcmp(divmod(3.25, -1.0), (-4.0, -0.75)) - assert not fcmp(divmod(-3.25, -1.0), (3.0, -0.25)) - - raises(TypeError, divmod) - - ''' XXX TODO No eval() support yet - def test_eval(self): - assert eval('1+1') == 2 - assert eval(' 1+1\n') == 2 - globals = {'a': 1, 'b': 2} - locals = {'b': 200, 'c': 300} - assert eval('a', globals) == 1 - assert eval('a', globals, locals) == 1 - assert eval('b', globals, locals) == 200 - assert eval('c', globals, locals) == 300 - if have_unicode: - assert eval(unicode('1+1')) == 2 - assert eval(unicode(' 1+1\n')) == 2 - globals = {'a': 1, 'b': 2} - locals = {'b': 200, 'c': 300} - if have_unicode: - assert eval(unicode('a'), globals) == 1 - assert eval(unicode('a'), globals, locals) == 1 - assert eval(unicode('b'), globals, locals) == 200 - assert eval(unicode('c'), globals, locals) == 300 - bom = '\xef\xbb\xbf' - assert eval(bom + 'a', globals, locals) == 1 - assert eval(unicode('u"\xc3\xa5"', 'utf8'), globals) == ( - unicode('\xc3\xa5', 'utf8')) - raises(TypeError, eval) - raises(TypeError, eval, ()) - - '\'' XXX TODO: Figure out later - # Done outside of the method test_z to get the correct scope - z = 0 - f = open(TESTFN, 'w') - f.write('z = z+1\n') - f.write('z = z*2\n') - f.close() - execfile(TESTFN) - - def test_execfile(self): - globals = {'a': 1, 'b': 2} - locals = {'b': 200, 'c': 300} - - assert self.__class__.z == 2 - globals['z'] = 0 - execfile(TESTFN, globals) - assert globals['z'] == 2 - locals['z'] = 0 - execfile(TESTFN, globals, locals) - assert locals['z'] == 2 - unlink(TESTFN) - raises(TypeError, execfile) - import os - raises(IOError, execfile, os.curdir) - raises(IOError, execfile, "I_dont_exist") - '\'' - ''' - - ''' XXX TODO: filter does NOT rely on __getitem__, but rather on - __iter__; it appears to me that the following two tests, - therefore, pass in CPython only because of the accident that - in that implementation str does not define __iter__ (while - list and tuple do, in 2.3). Probably best to substitute - most of these tests with more appropriate ones! - ''' - def test_filter(self): - assert filter(lambda c: 'a' <= c <= 'z', 'Hello World') == ( - 'elloorld') - assert (filter(None, [1, 'hello', [], [3], '', None, 9, 0]) - ) == [1, 'hello', [3], 9] - assert filter(lambda x: x > 0, [1, -3, 9, 0, 2]) == ( - [1, 9, 2]) - assert filter(None, Squares(10)) == ( - [1, 4, 9, 16, 25, 36, 49, 64, 81]) - assert filter(lambda x: x%2, Squares(10)) == ( - [1, 9, 25, 49, 81]) - def identity(item): - return 1 - filter(identity, Squares(5)) - raises(TypeError, filter) - ''' XXX rest of test disabled as above explained - class BadSeq(object): - def __getitem__(self, index): - if index<4: - return 42 - raise ValueError - raises(ValueError, filter, lambda x: x, BadSeq()) - def badfunc(): - pass - raises(TypeError, filter, badfunc, range(5)) - - # test bltinmodule.c::filtertuple() - assert filter(None, (1, 2)) == (1, 2) - assert filter(lambda x: x>=3, (1, 2, 3, 4)) == (3, 4) - raises(TypeError, filter, 42, (1, 2)) - - # test bltinmodule.c::filterstring() - assert filter(None, "12") == "12" - assert filter(lambda x: x>="3", "1234") == "34" - raises(TypeError, filter, 42, "12") - class badstr(str): - def __getitem__(self, index): - raise ValueError - raises(ValueError, filter, - lambda x: x >="3", badstr("1234")) - - class badstr2(str): - def __getitem__(self, index): - return 42 - raises(TypeError, filter, - lambda x: x >=42, badstr2("1234")) - - class weirdstr(str): - def __getitem__(self, index): - return weirdstr(2*str.__getitem__(self, index)) - assert filter(lambda x: x>="33", weirdstr("1234")) == ( - "3344") - - class shiftstr(str): - def __getitem__(self, index): - return chr(ord(str.__getitem__(self, index))+1) - assert filter(lambda x: x>="3", shiftstr("1234")) == "345" - - if have_unicode: - # test bltinmodule.c::filterunicode() - assert filter(None, unicode("12")) == unicode("12") - assert filter(lambda x: x>="3", unicode("1234")) == ( - unicode("34")) - raises(TypeError, filter, 42, unicode("12")) - raises(ValueError, filter, lambda x: x >="3", - badstr(unicode("1234"))) - - class badunicode(unicode): - def __getitem__(self, index): - return 42 - raises(TypeError, filter, lambda x: x >=42, - badunicode("1234")) - - class weirdunicode(unicode): - def __getitem__(self, index): - return weirdunicode(2*unicode.__getitem__(self, index)) - assert ( - filter(lambda x: x>=unicode("33"), weirdunicode("1234"))) == ( - unicode("3344")) - - class shiftunicode(unicode): - def __getitem__(self, index): - return unichr(ord(unicode.__getitem__(self, index))+1) - assert ( - filter(lambda x: x>=unicode("3"), shiftunicode("1234"))) == ( - unicode("345") - ) - - def test_filter_subclasses(self): - # test that filter() never returns tuple, str or unicode subclasses - # and that the result always goes through __getitem__ - funcs = (None, bool, lambda x: True) - class tuple2(tuple): - def __getitem__(self, index): - return 2*tuple.__getitem__(self, index) - class str2(str): - def __getitem__(self, index): - return 2*str.__getitem__(self, index) - inputs = { - tuple2: {(): (), (1, 2, 3): (2, 4, 6)}, - str2: {"": "", "123": "112233"} - } - if have_unicode: - class unicode2(unicode): - def __getitem__(self, index): - return 2*unicode.__getitem__(self, index) - inputs[unicode2] = { - unicode(): unicode(), - unicode("123"): unicode("112233") - } - - for (cls, inps) in inputs.iteritems(): - for (inp, exp) in inps.iteritems(): - # make sure the output goes through __getitem__ - # even if func is None - assert ( - filter(funcs[0], cls(inp))) == ( - filter(funcs[1], cls(inp)) - ) - for func in funcs: - outp = filter(func, cls(inp)) - assert outp == exp - assert not isinstance(outp, cls) - ''' - - def test_float(self): - assert float(3.14) == 3.14 - assert float(314) == 314.0 - assert float(314L) == 314.0 - assert float(" 3.14 ") == 3.14 - if have_unicode: - assert float(unicode(" 3.14 ")) == 3.14 - assert float(unicode( - " \u0663.\u0661\u0664 ",'raw-unicode-escape')) == 3.14 - - def test_getattr(self): - import sys - assert getattr(sys, 'stdout') is sys.stdout - raises(TypeError, getattr, sys, 1) - raises(TypeError, getattr, sys, 1, "foo") - raises(TypeError, getattr) - if have_unicode: - raises(UnicodeError, getattr, sys, - unichr(sys.maxunicode)) - - def test_hasattr(self): - import sys - assert hasattr(sys, 'stdout') - raises(TypeError, hasattr, sys, 1) - raises(TypeError, hasattr) - if have_unicode: - raises(UnicodeError, hasattr, sys, - unichr(sys.maxunicode)) - - def test_hash(self): - hash(None) - assert hash(1) == hash(1L) - assert hash(1) == hash(1.0) - hash('spam') - if have_unicode: - assert hash('spam') == hash(unicode('spam')) - hash((0,1,2,3)) - def f(): pass - raises(TypeError, hash, []) - raises(TypeError, hash, {}) - - def test_hex(self): - assert hex(16) == '0x10' - assert hex(16L) == '0x10L' - assert len(hex(-1)) == len(hex(sys.maxint)) - assert hex(-16) in ('0xfffffff0', '0xfffffffffffffff0') - assert hex(-16L) == '-0x10L' - raises(TypeError, hex, {}) - - def test_id(self): - id(None) - id(1) - id(1L) - id(1.0) - id('spam') - id((0,1,2,3)) - id([0,1,2,3]) - id({'spam': 1, 'eggs': 2, 'ham': 3}) - - # Test input() later, together with raw_input - - def test_int(self): - assert int(314) == 314 - assert int(3.14) == 3 - assert int(314L) == 314 - # Check that conversion from float truncates towards zero - assert int(-3.14) == -3 - assert int(3.9) == 3 - assert int(-3.9) == -3 - assert int(3.5) == 3 - assert int(-3.5) == -3 - # Different base: - assert int("10",16) == 16L - if have_unicode: - assert int(unicode("10"),16) == 16L - # Test conversion from strings and various anomalies - for s, v in L: - for sign in "", "+", "-": - for prefix in "", " ", "\t", " \t\t ": - ss = prefix + sign + s - vv = v - if sign == "-" and v is not ValueError: - vv = -v - try: - assert int(ss) == vv - except v: - pass - - s = `-1-sys.maxint` - assert int(s)+1 == -sys.maxint - # should return long - ''' XXX TODO: Longs not well supported yet - int(s[1:]) - - # should return long - x = int(1e100) - assert isinstance(x, long) - x = int(-1e100) - assert isinstance(x, long) - ''' - - # SF bug 434186: 0x80000000/2 != 0x80000000>>1. - # Worked by accident in Windows release build, but failed in - # debug build. Failed in all Linux builds. - x = -1-sys.maxint - assert x >> 1 == x//2 - - raises(ValueError, int, '123\0') - raises(ValueError, int, '53', 40) - - ''' XXX TODO: Longs not supported yet - x = int('1' * 600) - assert isinstance(x, long) - - if have_unicode: - x = int(unichr(0x661) * 600) - assert isinstance(x, long) - - raises(TypeError, int, 1, 12) - ''' - - assert int('0123', 0) == 83 - - def test_intern(self): - raises(TypeError, intern) - s = "never interned before" - assert intern(s) is s - s2 = s.swapcase().swapcase() - assert intern(s2) is s - - def test_iter(self): - raises(TypeError, iter) - raises(TypeError, iter, 42, 42) - lists = [("1", "2"), ["1", "2"], "12"] - if have_unicode: - lists.append(unicode("12")) - for l in lists: - i = iter(l) - assert i.next() == '1' - assert i.next() == '2' - raises(StopIteration, i.next) - - def test_isinstance(self): - class C: - pass - class D(C): - pass - class E: - pass - c = C() - d = D() - e = E() - assert isinstance(c, C) - assert isinstance(d, C) - assert not isinstance(e, C) - assert not isinstance(c, D) - assert not isinstance('foo', E) - raises(TypeError, isinstance, E, 'foo') - raises(TypeError, isinstance) - - def test_issubclass(self): - class C: - pass - class D(C): - pass - class E: - pass - c = C() - d = D() - e = E() - assert issubclass(D, C) - assert issubclass(C, C) - assert not issubclass(C, D) - raises(TypeError, issubclass, 'foo', E) - raises(TypeError, issubclass, E, 'foo') - raises(TypeError, issubclass) - - def test_len(self): - assert len('123') == 3 - assert len(()) == 0 - assert len((1, 2, 3, 4)) == 4 - assert len([1, 2, 3, 4]) == 4 - assert len({}) == 0 - assert len({'a':1, 'b': 2}) == 2 - class BadSeq: - def __len__(self): - raise ValueError - raises(ValueError, len, BadSeq()) - - if 1: - - def test_list(self): - assert list([]) == [] - l0_3 = [0, 1, 2, 3] - l0_3_bis = list(l0_3) - assert l0_3 == l0_3_bis - assert l0_3 is not l0_3_bis - assert list(()) == [] - assert list((0, 1, 2, 3)) == [0, 1, 2, 3] - assert list('') == [] - assert list('spam') == ['s', 'p', 'a', 'm'] - - ''' XXX TODO: disabled for now -- far too slow! - if sys.maxint == 0x7fffffff: - # This test can currently only work on 32-bit machines. - # XXX If/when PySequence_Length() returns a ssize_t, it should be - # XXX re-enabled. - # Verify clearing of bug #556025. - # This assumes that the max data size (sys.maxint) == max - # address size this also assumes that the address size is at - # least 4 bytes with 8 byte addresses, the bug is not well - # tested - # - # Note: This test is expected to SEGV under Cygwin 1.3.12 or - # earlier due to a newlib bug. See the following mailing list - # thread for the details: - - # http://sources.redhat.com/ml/newlib/2002/msg00369.html - raises(MemoryError, list, xrange(sys.maxint // 2)) - ''' - - ''' XXX TODO: disabled for now -- long not yet well supported - def test_long(self): - assert long(314) == 314L - assert long(3.14) == 3L - assert long(314L) == 314L - # Check that conversion from float truncates towards zero - assert long(-3.14) == -3L - assert long(3.9) == 3L - assert long(-3.9) == -3L - assert long(3.5) == 3L - assert long(-3.5) == -3L - assert long("-3") == -3L - if have_unicode: - assert long(unicode("-3")) == -3L - # Different base: - assert long("10",16) == 16L - if have_unicode: - assert long(unicode("10"),16) == 16L - # Check conversions from string (same test set as for int(), and then some) - LL = [ - ('1' + '0'*20, 10L**20), - ('1' + '0'*100, 10L**100) - ] - L2 = L[:] - if have_unicode: - L2 += [ - (unicode('1') + unicode('0')*20, 10L**20), - (unicode('1') + unicode('0')*100, 10L**100), - ] - for s, v in L2 + LL: - for sign in "", "+", "-": - for prefix in "", " ", "\t", " \t\t ": - ss = prefix + sign + s - vv = v - if sign == "-" and v is not ValueError: - vv = -v - try: - assert long(ss) == long(vv) - except v: - pass - - raises(ValueError, long, '123\0') - raises(ValueError, long, '53', 40) - raises(TypeError, long, 1, 12) - ''' - - def test_map(self): - assert ( - map(None, 'hello world')) == ( - ['h','e','l','l','o',' ','w','o','r','l','d'] - ) - assert ( - map(None, 'abcd', 'efg')) == ( - [('a', 'e'), ('b', 'f'), ('c', 'g'), ('d', None)] - ) - assert ( - map(None, range(10))) == ( - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - ) - assert ( - map(lambda x: x*x, range(1,4))) == ( - [1, 4, 9] - ) - try: - from math import sqrt - except ImportError: - def sqrt(x): - return pow(x, 0.5) - assert ( - map(lambda x: map(sqrt,x), [[16, 4], [81, 9]])) == ( - [[4.0, 2.0], [9.0, 3.0]] - ) - assert ( - map(lambda x, y: x+y, [1,3,2], [9,1,4])) == ( - [10, 4, 6] - ) - - def plus(*v): - accu = 0 - for i in v: accu = accu + i - return accu - assert ( - map(plus, [1, 3, 7])) == ( - [1, 3, 7] - ) - assert ( - map(plus, [1, 3, 7], [4, 9, 2])) == ( - [1+4, 3+9, 7+2] - ) - assert ( - map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0])) == ( - [1+4+1, 3+9+1, 7+2+0] - ) - assert ( - map(None, Squares(10))) == ( - [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] - ) - assert ( - map(int, Squares(10))) == ( - [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] - ) - assert ( - map(None, Squares(3), Squares(2))) == ( - [(0,0), (1,1), (4,None)] - ) - assert ( - map(max, Squares(3), Squares(2))) == ( - [0, 1, 4] - ) - raises(TypeError, map) - raises(TypeError, map, lambda x: x, 42) - assert map(None, [42]) == [42] - class BadSeq: - def __getitem__(self, index): - raise ValueError - raises(ValueError, map, lambda x: x, BadSeq()) - - def test_max(self): - assert max('123123') == '3' - assert max(1, 2, 3) == 3 - assert max((1, 2, 3, 1, 2, 3)) == 3 - assert max([1, 2, 3, 1, 2, 3]) == 3 - - assert max(1, 2L, 3.0) == 3.0 - assert max(1L, 2.0, 3) == 3 - assert max(1.0, 2, 3L) == 3L - - def test_min(self): - assert min('123123') == '1' - assert min(1, 2, 3) == 1 - assert min((1, 2, 3, 1, 2, 3)) == 1 - assert min([1, 2, 3, 1, 2, 3]) == 1 - - assert min(1, 2L, 3.0) == 1 - assert min(1L, 2.0, 3) == 1L - assert min(1.0, 2, 3L) == 1.0 - - raises(TypeError, min) - raises(TypeError, min, 42) - raises(ValueError, min, ()) - class BadSeq: - def __getitem__(self, index): - raise ValueError - raises(ValueError, min, BadSeq()) - ''' XXX TODO: some weird bug in pypy here -- fix later - class BadNumber: - def __cmp__(self, other): - raise ValueError - raises(ValueError, min, (42, BadNumber())) - ''' - - def test_oct(self): - assert oct(100) == '0144' - assert oct(100L) == '0144L' - assert oct(-100) in ('037777777634', '01777777777777777777634') - assert oct(-100L) == '-0144L' - raises(TypeError, oct, ()) - - - def test_open(self): - def write_testfile(): - # NB the first 4 lines are also used to test input and raw_input, below - fp = open(TESTFN, 'w') - try: - fp.write('1+1\n') - fp.write('1+1\n') - fp.write('The quick brown fox jumps over the lazy dog') - fp.write('.\n') - fp.write('Dear John\n') - fp.write('XXX'*100) - fp.write('YYY'*100) - finally: - fp.close() - write_testfile() - fp = open(TESTFN, 'r') - try: - assert fp.readline(4) == '1+1\n' - assert fp.readline(4) == '1+1\n' - assert fp.readline() == 'The quick brown fox jumps over the lazy dog.\n' - assert fp.readline(4) == 'Dear' - assert fp.readline(100) == ' John\n' - assert fp.read(300) == 'XXX'*100 - assert fp.read(1000) == 'YYY'*100 - finally: - fp.close() - unlink(TESTFN) - - def test_ord(self): - assert ord(' ') == 32 - assert ord('A') == 65 - assert ord('a') == 97 - raises(TypeError, ord, 42) - if have_unicode: - assert ord(unichr(sys.maxunicode)) == sys.maxunicode - raises(TypeError, ord, unicode("12")) - - def test_pow(self): - assert pow(0,0) == 1 - assert pow(0,1) == 0 - assert pow(1,0) == 1 - assert pow(1,1) == 1 - - assert pow(2,0) == 1 - assert pow(2,10) == 1024 - assert pow(2,20) == 1024*1024 - assert pow(2,30) == 1024*1024*1024 - - assert pow(-2,0) == 1 - assert pow(-2,1) == -2 - assert pow(-2,2) == 4 - assert pow(-2,3) == -8 - - assert pow(0L,0) == 1 - assert pow(0L,1) == 0 - assert pow(1L,0) == 1 - assert pow(1L,1) == 1 - - assert pow(2L,0) == 1 - assert pow(2L,10) == 1024 - assert pow(2L,20) == 1024*1024 - assert pow(2L,30) == 1024*1024*1024 - - assert pow(-2L,0) == 1 - assert pow(-2L,1) == -2 - assert pow(-2L,2) == 4 - assert pow(-2L,3) == -8 - - assert round(pow(0.,0) - 1., 7) == 0 - assert round(pow(0.,1) - 0., 7) == 0 - assert round(pow(1.,0) - 1., 7) == 0 - assert round(pow(1.,1) - 1., 7) == 0 - - assert round(pow(2.,0) - 1., 7) == 0 - assert round(pow(2.,10) - 1024., 7) == 0 - assert round(pow(2.,20) - 1024.*1024., 7) == 0 - assert round(pow(2.,30) - 1024.*1024.*1024., 7) == 0 - - assert round(pow(-2.,0) - 1., 7) == 0 - assert round(pow(-2.,1) - -2., 7) == 0 - assert round(pow(-2.,2) - 4., 7) == 0 - assert round(pow(-2.,3) - -8., 7) == 0 - - for x in 2, 2L, 2.0: - for y in 10, 10L, 10.0: - for z in 1000, 1000L, 1000.0: - if isinstance(x, float) or \ - isinstance(y, float) or \ - isinstance(z, float): - raises(TypeError, pow, x, y, z) - else: - assert round(pow(x, y, z) - 24.0, 7) == 0 - - raises(TypeError, pow, -1, -2, 3) - raises(ValueError, pow, 1, 2, 0) - raises(TypeError, pow, -1L, -2L, 3L) - raises(ValueError, pow, 1L, 2L, 0L) - raises(ValueError, pow, -342.43, 0.234) - - raises(TypeError, pow) - - def test_range(self): - assert range(3) == [0, 1, 2] - assert range(1, 5) == [1, 2, 3, 4] - assert range(0) == [] - assert range(-3) == [] - assert range(1, 10, 3) == [1, 4, 7] - assert range(5, -5, -3) == [5, 2, -1, -4] - - # Now test range() with longs - assert range(-2**100) == [] - assert range(0, -2**100) == [] - assert range(0, 2**100, -1) == [] - assert range(0, 2**100, -1) == [] - - a = long(10 * sys.maxint) - b = long(100 * sys.maxint) - c = long(50 * sys.maxint) - - assert range(a, a+2) == [a, a+1] - assert range(a+2, a, -1L) == [a+2, a+1] - assert range(a+4, a, -2) == [a+4, a+2] - - seq = range(a, b, c) - assert a in seq - assert b not in seq - assert len(seq) == 2 - - seq = range(b, a, -c) - assert b in seq - assert a not in seq - assert len(seq) == 2 - - seq = range(-a, -b, -c) - assert -a in seq - assert -b not in seq - assert len(seq) == 2 - - raises(TypeError, range) - raises(TypeError, range, 1, 2, 3, 4) - raises(ValueError, range, 1, 2, 0) - - # Reject floats when it would require PyLongs to represent. - # (smaller floats still accepted, but deprecated) - raises(TypeError, range, 1e100, 1e101, 1e101) - - raises(TypeError, range, 0, "spam") - raises(TypeError, range, 0, 42, "spam") - - raises(OverflowError, range, -sys.maxint, sys.maxint) - raises(OverflowError, range, 0, 2*sys.maxint) - - ''' XXX TODO: input and raw_input not supported yet - def test_input_and_raw_input(self): - self.write_testfile() - fp = open(TESTFN, 'r') - savestdin = sys.stdin - savestdout = sys.stdout # Eats the echo - try: - sys.stdin = fp - sys.stdout = BitBucket() - assert input() == 2 - assert input('testing\n') == 2 - assert raw_input() == 'The quick brown fox jumps over the lazy dog.' - assert raw_input('testing\n') == 'Dear John' - sys.stdin = cStringIO.StringIO("NULL\0") - raises(TypeError, input, 42, 42) - sys.stdin = cStringIO.StringIO(" 'whitespace'") - assert input() == 'whitespace' - sys.stdin = cStringIO.StringIO() - raises(EOFError, input) - del sys.stdout - raises(RuntimeError, input, 'prompt') - del sys.stdin - raises(RuntimeError, input, 'prompt') - finally: - sys.stdin = savestdin - sys.stdout = savestdout - fp.close() - unlink(TESTFN) - ''' - - def test_reduce(self): - assert reduce(lambda x, y: x+y, ['a', 'b', 'c'], '') == 'abc' - assert ( - reduce(lambda x, y: x+y, [['a', 'c'], [], ['d', 'w']], [])) == ( - ['a','c','d','w'] - ) - assert reduce(lambda x, y: x*y, range(2,8), 1) == 5040 - assert ( - reduce(lambda x, y: x*y, range(2,21), 1L)) == ( - 2432902008176640000L - ) - assert reduce(lambda x, y: x+y, Squares(10)) == 285 - assert reduce(lambda x, y: x+y, Squares(10), 0) == 285 - assert reduce(lambda x, y: x+y, Squares(0), 0) == 0 - raises(TypeError, reduce) - raises(TypeError, reduce, 42, 42) - raises(TypeError, reduce, 42, 42, 42) - assert reduce(42, "1") == "1" # func is never called with one item - assert reduce(42, "", "1") == "1" # func is never called with one item - raises(TypeError, reduce, 42, (42, 42)) - - class BadSeq: - def __getitem__(self, index): - raise ValueError - raises(ValueError, reduce, 42, BadSeq()) - - ''' XXX TODO: we don't have reload yet - def test_reload(self): - import sys - reload(sys) - import string - reload(string) - ## import sys - ## raises(ImportError, reload, sys) - ''' - - def test_repr(self): - assert repr('') == '\'\'' - assert repr(0) == '0' - assert repr(0L) == '0L' - assert repr(()) == '()' - assert repr([]) == '[]' - assert repr({}) == '{}' - ''' XXX TODO: we don't yet support "circular" objects! - a = [] - a.append(a) - assert repr(a) == '[[...]]' - a = {} - a[0] = a - assert repr(a) == '{0: {...}}' - ''' - - def test_round(self): - assert round(0.0) == 0.0 - assert round(1.0) == 1.0 - assert round(10.0) == 10.0 - assert round(1000000000.0) == 1000000000.0 - assert round(1e20) == 1e20 - - assert round(-1.0) == -1.0 - assert round(-10.0) == -10.0 - assert round(-1000000000.0) == -1000000000.0 - assert round(-1e20) == -1e20 - - assert round(0.1) == 0.0 - assert round(1.1) == 1.0 - assert round(10.1) == 10.0 - assert round(1000000000.1) == 1000000000.0 - - assert round(-1.1) == -1.0 - assert round(-10.1) == -10.0 - assert round(-1000000000.1) == -1000000000.0 - - assert round(0.9) == 1.0 - assert round(9.9) == 10.0 - assert round(999999999.9) == 1000000000.0 - - assert round(-0.9) == -1.0 - assert round(-9.9) == -10.0 - assert round(-999999999.9) == -1000000000.0 - - assert round(-8.0, -1) == -10.0 - - raises(TypeError, round) - - def test_setattr(self): - setattr(sys, 'spam', 1) - assert sys.spam == 1 - raises(TypeError, setattr, sys, 1, 'spam') - raises(TypeError, setattr) - - def test_str(self): - assert str('') == '' - assert str(0) == '0' - assert str(0L) == '0' - assert str(()) == '()' - assert str([]) == '[]' - assert str({}) == '{}' - ''' XXX TODO: we don't yet support "circular" objects! - a = [] - a.append(a) - assert str(a) == '[[...]]' - a = {} - a[0] = a - assert str(a) == '{0: {...}}' - ''' - - def test_sum(self): - assert sum([]) == 0 - assert sum(range(2,8)) == 27 - assert sum(iter(range(2,8))) == 27 - assert sum(Squares(10)) == 285 - assert sum(iter(Squares(10))) == 285 - assert sum([[1], [2], [3]], []) == [1, 2, 3] - - raises(TypeError, sum) - raises(TypeError, sum, 42) - raises(TypeError, sum, ['a', 'b', 'c']) - raises(TypeError, sum, ['a', 'b', 'c'], '') - raises(TypeError, sum, [[1], [2], [3]]) - raises(TypeError, sum, [{2:3}]) - raises(TypeError, sum, [{2:3}]*2, {2:3}) - - class BadSeq: - def __getitem__(self, index): - raise ValueError - raises(ValueError, sum, BadSeq()) - - def test_tuple(self): - assert tuple(()) == () - t0_3 = (0, 1, 2, 3) - t0_3_bis = tuple(t0_3) - ''' XXX TODO: tuples are immutable -- returns same object in CPython ''' - #self.assert_(t0_3 is t0_3_bis) - assert t0_3 == t0_3_bis - assert tuple([]) == () - assert tuple([0, 1, 2, 3]) == (0, 1, 2, 3) - assert tuple('') == () - assert tuple('spam') == ('s', 'p', 'a', 'm') - - def test_type(self): - assert type('') == type('123') - assert type('') != type(()) - - def test_unichr(self): - if have_unicode: - assert unichr(32) == unicode(' ') - assert unichr(65) == unicode('A') - assert unichr(97) == unicode('a') - assert ( - unichr(sys.maxunicode)) == ( - unicode('\\U%08x' % (sys.maxunicode), 'unicode-escape') - ) - raises(ValueError, unichr, sys.maxunicode+1) - raises(TypeError, unichr) - - def test_vars(self): - def get_vars_f0(): - return vars() - def get_vars_f2(): - get_vars_f0() - a = 1 - b = 2 - return vars() - assert Set(vars()) == Set(dir()) - import sys - assert Set(vars(sys)) == Set(dir(sys)) - assert get_vars_f0() == {} - assert get_vars_f2() == {'a': 1, 'b': 2} - raises(TypeError, vars, 42, 42) - raises(TypeError, vars, 42) - - def test_zip(self): - a = (1, 2, 3) - b = (4, 5, 6) - t = [(1, 4), (2, 5), (3, 6)] - assert zip(a, b) == t - b = [4, 5, 6] - assert zip(a, b) == t - b = (4, 5, 6, 7) - assert zip(a, b) == t - class I: - def __getitem__(self, i): - if i < 0 or i > 2: raise IndexError - return i + 4 - assert zip(a, I()) == t - raises(TypeError, zip) - raises(TypeError, zip, None) - class G: - pass - raises(TypeError, zip, a, G()) - - # Make sure zip doesn't try to allocate a billion elements for the - # result list when one of its arguments doesn't say how long it is. - # A MemoryError is the most likely failure mode. - class SequenceWithoutALength: - def __getitem__(self, i): - if i == 5: - raise IndexError - else: - return i - s = SequenceWithoutALength() - assert ( - zip(s, xrange(2**30))) == ( - [(x,x) for x in s] - ) - - class BadSeq: - def __getitem__(self, i): - if i == 5: - raise ValueError - else: - return i - raises(ValueError, zip, BadSeq(), BadSeq()) diff --git a/pypy/appspace/test/conftest.py b/pypy/appspace/test/conftest.py deleted file mode 100644 index c574f07c83..0000000000 --- a/pypy/appspace/test/conftest.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import generators - -import py -from pypy.conftest import getobjspace, PyPyItem -from pypy.interpreter.main import run_string - -class Directory(py.test.collect.Directory): - def __iter__(self): - for path in self.fspath.listdir(): - if path.check(fnmatch='cpy_test_*.py', file=1): - continue - #XXX yield RunFileAtAppLevelItem(py.path.extpy(path)) - elif self.fil(path): - if path.basename in ('test_cmathmodule.py', - 'builtin_functions_test.py', - 'test_complexobject.py',): - continue - yield self.Module(path) - elif self.rec(path): - yield self.Directory(path) - -class RunFileAtAppLevelItem(PyPyItem): - def run(self, driver): - space = getobjspace() - source = self.extpy.root.read() - #self.execute_appex(space, run_string, source, str(self.extpy.root), space) - run_string(source, str(self.extpy.root), space) diff --git a/pypy/appspace/test/cpy_test_types.py b/pypy/appspace/test/cpy_test_types.py deleted file mode 100644 index 9272ae6d23..0000000000 --- a/pypy/appspace/test/cpy_test_types.py +++ /dev/null @@ -1,787 +0,0 @@ -# Python test set -- part 6, built-in types -# Slightly edited version for PyPy. - -from support_tests import * - -print '6. Built-in types' - -print '6.1 Truth value testing' -if None: raise TestFailed, 'None is true instead of false' -if 0: raise TestFailed, '0 is true instead of false' -if 0L: raise TestFailed, '0L is true instead of false' -if 0.0: raise TestFailed, '0.0 is true instead of false' -if '': raise TestFailed, '\'\' is true instead of false' -if (): raise TestFailed, '() is true instead of false' -if []: raise TestFailed, '[] is true instead of false' -if {}: raise TestFailed, '{} is true instead of false' -if not 1: raise TestFailed, '1 is false instead of true' -if not 1L: raise TestFailed, '1L is false instead of true' -if not 1.0: raise TestFailed, '1.0 is false instead of true' -if not 'x': raise TestFailed, '\'x\' is false instead of true' -if not (1, 1): raise TestFailed, '(1, 1) is false instead of true' -if not [1]: raise TestFailed, '[1] is false instead of true' -if not {'x': 1}: raise TestFailed, '{\'x\': 1} is false instead of true' -def f(): pass -class C: pass -import sys -x = C() -if not f: raise TestFailed, 'f is false instead of true' -if not C: raise TestFailed, 'C is false instead of true' -if not sys: raise TestFailed, 'sys is false instead of true' -if not x: raise TestFailed, 'x is false instead of true' - -print '6.2 Boolean operations' -if 0 or 0: raise TestFailed, '0 or 0 is true instead of false' -if 1 and 1: pass -else: raise TestFailed, '1 and 1 is false instead of false' -if not 1: raise TestFailed, 'not 1 is true instead of false' - -print '6.3 Comparisons' -if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass -else: raise TestFailed, 'int comparisons failed' -if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass -else: raise TestFailed, 'long int comparisons failed' -if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass -else: raise TestFailed, 'float comparisons failed' -if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass -else: raise TestFailed, 'string comparisons failed' -if 0 in [0] and 0 not in [1]: pass -else: raise TestFailed, 'membership test failed' -if None is None and [] is not []: pass -else: raise TestFailed, 'identity test failed' - - -print '6.3.1 Conversion errors' -try: float('') -except ValueError: pass -else: raise TestFailed, "float('') didn't raise ValueError" - -try: float('5\0') -except ValueError: pass -else: raise TestFailed, "float('5\0') didn't raise ValueError" - -print '6.3.2 Division errors' -try: 5.0 / 0.0 -except ZeroDivisionError: pass -else: raise TestFailed, "5.0 / 0.0 didn't raise ZeroDivisionError" - -try: 5.0 // 0.0 -except ZeroDivisionError: pass -else: raise TestFailed, "5.0 // 0.0 didn't raise ZeroDivisionError" - -try: 5.0 % 0.0 -except ZeroDivisionError: pass -else: raise TestFailed, "5.0 % 0.0 didn't raise ZeroDivisionError" - -try: 5L / 0 -except ZeroDivisionError: pass -else: raise TestFailed, "5L / 0 didn't raise ZeroDivisionError" - -try: 5 / 0L -except ZeroDivisionError: pass -else: raise TestFailed, "5 / 0L didn't raise ZeroDivisionError" - -try: 5 // 0L -except ZeroDivisionError: pass -else: raise TestFailed, "5 // 0L didn't raise ZeroDivisionError" - -try: 5 % 0L -except ZeroDivisionError: pass -else: raise TestFailed, "5 % 0L didn't raise ZeroDivisionError" - -print '6.4 Numeric types (mostly conversions)' -if 0 != 0L or 0 != 0.0 or 0L != 0.0: raise TestFailed, 'mixed comparisons' -if 1 != 1L or 1 != 1.0 or 1L != 1.0: raise TestFailed, 'mixed comparisons' -if -1 != -1L or -1 != -1.0 or -1L != -1.0: - raise TestFailed, 'int/long/float value not equal' -# calling built-in types without argument must return 0 -if int() != 0: raise TestFailed, 'int() does not return 0' -if long() != 0L: raise TestFailed, 'long() does not return 0L' -if float() != 0.0: raise TestFailed, 'float() does not return 0.0' -if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass -else: raise TestFailed, 'int() does not round properly' -if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass -else: raise TestFailed, 'long() does not round properly' -if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass -else: raise TestFailed, 'float() does not work properly' -print '6.4.1 32-bit integers' -if 12 + 24 != 36: raise TestFailed, 'int op' -if 12 + (-24) != -12: raise TestFailed, 'int op' -if (-12) + 24 != 12: raise TestFailed, 'int op' -if (-12) + (-24) != -36: raise TestFailed, 'int op' -if not 12 < 24: raise TestFailed, 'int op' -if not -24 < -12: raise TestFailed, 'int op' -# Test for a particular bug in integer multiply -xsize, ysize, zsize = 238, 356, 4 -if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912): - raise TestFailed, 'int mul commutativity' -# And another. -m = -sys.maxint - 1 -for divisor in 1, 2, 4, 8, 16, 32: - j = m // divisor - prod = divisor * j - if prod != m: - raise TestFailed, "%r * %r == %r != %r" % (divisor, j, prod, m) - if type(prod) is not int: - raise TestFailed, ("expected type(prod) to be int, not %r" % - type(prod)) -# Check for expected * overflow to long. -for divisor in 1, 2, 4, 8, 16, 32: - j = m // divisor - 1 - prod = divisor * j - if type(prod) is not long: - raise TestFailed, ("expected type(%r) to be long, not %r" % - (prod, type(prod))) -# Check for expected * overflow to long. -m = sys.maxint -for divisor in 1, 2, 4, 8, 16, 32: - j = m // divisor + 1 - prod = divisor * j - if type(prod) is not long: - raise TestFailed, ("expected type(%r) to be long, not %r" % - (prod, type(prod))) - -print '6.4.2 Long integers' -if 12L + 24L != 36L: raise TestFailed, 'long op' -if 12L + (-24L) != -12L: raise TestFailed, 'long op' -if (-12L) + 24L != 12L: raise TestFailed, 'long op' -if (-12L) + (-24L) != -36L: raise TestFailed, 'long op' -if not 12L < 24L: raise TestFailed, 'long op' -if not -24L < -12L: raise TestFailed, 'long op' -x = sys.maxint -if int(long(x)) != x: raise TestFailed, 'long op' -try: y = int(long(x)+1L) -except OverflowError: raise TestFailed, 'long op' -if not isinstance(y, long): raise TestFailed, 'long op' -x = -x -if int(long(x)) != x: raise TestFailed, 'long op' -x = x-1 -if int(long(x)) != x: raise TestFailed, 'long op' -try: y = int(long(x)-1L) -except OverflowError: raise TestFailed, 'long op' -if not isinstance(y, long): raise TestFailed, 'long op' - -try: 5 << -5 -except ValueError: pass -else: raise TestFailed, 'int negative shift <<' - -try: 5L << -5L -except ValueError: pass -else: raise TestFailed, 'long negative shift <<' - -try: 5 >> -5 -except ValueError: pass -else: raise TestFailed, 'int negative shift >>' - -try: 5L >> -5L -except ValueError: pass -else: raise TestFailed, 'long negative shift >>' - -print '6.4.3 Floating point numbers' -if 12.0 + 24.0 != 36.0: raise TestFailed, 'float op' -if 12.0 + (-24.0) != -12.0: raise TestFailed, 'float op' -if (-12.0) + 24.0 != 12.0: raise TestFailed, 'float op' -if (-12.0) + (-24.0) != -36.0: raise TestFailed, 'float op' -if not 12.0 < 24.0: raise TestFailed, 'float op' -if not -24.0 < -12.0: raise TestFailed, 'float op' - -print '6.5 Sequence types' - -print '6.5.1 Strings' -if len('') != 0: raise TestFailed, 'len(\'\')' -if len('a') != 1: raise TestFailed, 'len(\'a\')' -if len('abcdef') != 6: raise TestFailed, 'len(\'abcdef\')' -if 'xyz' + 'abcde' != 'xyzabcde': raise TestFailed, 'string concatenation' -if 'xyz'*3 != 'xyzxyzxyz': raise TestFailed, 'string repetition *3' -if 0*'abcde' != '': raise TestFailed, 'string repetition 0*' -if min('abc') != 'a' or max('abc') != 'c': raise TestFailed, 'min/max string' -if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass -else: raise TestFailed, 'in/not in string' -x = 'x'*103 -if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug' - -#extended slices for strings -a = '0123456789' -vereq(a[::], a) -vereq(a[::2], '02468') -vereq(a[1::2], '13579') -vereq(a[::-1],'9876543210') -vereq(a[::-2], '97531') -vereq(a[3::-2], '31') -vereq(a[-100:100:], a) -vereq(a[100:-100:-1], a[::-1]) -vereq(a[-100L:100L:2L], '02468') - -if have_unicode: - a = unicode('0123456789', 'ascii') - vereq(a[::], a) - vereq(a[::2], unicode('02468', 'ascii')) - vereq(a[1::2], unicode('13579', 'ascii')) - vereq(a[::-1], unicode('9876543210', 'ascii')) - vereq(a[::-2], unicode('97531', 'ascii')) - vereq(a[3::-2], unicode('31', 'ascii')) - vereq(a[-100:100:], a) - vereq(a[100:-100:-1], a[::-1]) - vereq(a[-100L:100L:2L], unicode('02468', 'ascii')) - - -print '6.5.2 Tuples' -# calling built-in types without argument must return empty -if tuple() != (): raise TestFailed,'tuple() does not return ()' -if len(()) != 0: raise TestFailed, 'len(())' -if len((1,)) != 1: raise TestFailed, 'len((1,))' -if len((1,2,3,4,5,6)) != 6: raise TestFailed, 'len((1,2,3,4,5,6))' -if (1,2)+(3,4) != (1,2,3,4): raise TestFailed, 'tuple concatenation' -if (1,2)*3 != (1,2,1,2,1,2): raise TestFailed, 'tuple repetition *3' -if 0*(1,2,3) != (): raise TestFailed, 'tuple repetition 0*' -if min((1,2)) != 1 or max((1,2)) != 2: raise TestFailed, 'min/max tuple' -if 0 in (0,1,2) and 1 in (0,1,2) and 2 in (0,1,2) and 3 not in (0,1,2): pass -else: raise TestFailed, 'in/not in tuple' -try: ()[0] -except IndexError: pass -else: raise TestFailed, "tuple index error didn't raise IndexError" -x = () -x += () -if x != (): raise TestFailed, 'tuple inplace add from () to () failed' -x += (1,) -if x != (1,): raise TestFailed, 'tuple resize from () failed' - -# extended slicing - subscript only for tuples -a = (0,1,2,3,4) -vereq(a[::], a) -vereq(a[::2], (0,2,4)) -vereq(a[1::2], (1,3)) -vereq(a[::-1], (4,3,2,1,0)) -vereq(a[::-2], (4,2,0)) -vereq(a[3::-2], (3,1)) -vereq(a[-100:100:], a) -vereq(a[100:-100:-1], a[::-1]) -vereq(a[-100L:100L:2L], (0,2,4)) - -# Check that a specific bug in _PyTuple_Resize() is squashed. -def f(): - for i in range(1000): - yield i -vereq(list(tuple(f())), range(1000)) - -# Verify that __getitem__ overrides are not recognized by __iter__ -# XXX TODO: this fails with PyPy because overriding __getitem__ will -# really override what the sequence iterator returns -#class T(tuple): -# def __getitem__(self, key): -# return str(key) + '!!!' -#vereq(iter(T((1,2))).next(), 1) - -print '6.5.3 Lists' -# calling built-in types without argument must return empty -if list() != []: raise TestFailed,'list() does not return []' -if len([]) != 0: raise TestFailed, 'len([])' -if len([1,]) != 1: raise TestFailed, 'len([1,])' -if len([1,2,3,4,5,6]) != 6: raise TestFailed, 'len([1,2,3,4,5,6])' -if [1,2]+[3,4] != [1,2,3,4]: raise TestFailed, 'list concatenation' -if [1,2]*3 != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3' -if [1,2]*3L != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3L' -if 0*[1,2,3] != []: raise TestFailed, 'list repetition 0*' -if 0L*[1,2,3] != []: raise TestFailed, 'list repetition 0L*' -if min([1,2]) != 1 or max([1,2]) != 2: raise TestFailed, 'min/max list' -if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass -else: raise TestFailed, 'in/not in list' -a = [1, 2, 3, 4, 5] -a[:-1] = a -if a != [1, 2, 3, 4, 5, 5]: - raise TestFailed, "list self-slice-assign (head)" -a = [1, 2, 3, 4, 5] -a[1:] = a -if a != [1, 1, 2, 3, 4, 5]: - raise TestFailed, "list self-slice-assign (tail)" -a = [1, 2, 3, 4, 5] -a[1:-1] = a -if a != [1, 1, 2, 3, 4, 5, 5]: - raise TestFailed, "list self-slice-assign (center)" -try: [][0] -except IndexError: pass -else: raise TestFailed, "list index error didn't raise IndexError" -try: [][0] = 5 -except IndexError: pass -else: raise TestFailed, "list assignment index error didn't raise IndexError" -try: [].pop() -except IndexError: pass -else: raise TestFailed, "empty list.pop() didn't raise IndexError" -try: [1].pop(5) -except IndexError: pass -else: raise TestFailed, "[1].pop(5) didn't raise IndexError" -try: [][0:1] = 5 -except TypeError: pass -else: raise TestFailed, "bad list slice assignment didn't raise TypeError" -try: [].extend(None) -except TypeError: pass -else: raise TestFailed, "list.extend(None) didn't raise TypeError" -a = [1, 2, 3, 4] -a *= 0 -if a != []: - raise TestFailed, "list inplace repeat" - -a = [] -a[:] = tuple(range(10)) -if a != range(10): - raise TestFailed, "assigning tuple to slice" - -print '6.5.3a Additional list operations' -a = [0,1,2,3,4] -a[0L] = 1 -a[1L] = 2 -a[2L] = 3 -if a != [1,2,3,3,4]: raise TestFailed, 'list item assignment [0L], [1L], [2L]' -a[0] = 5 -a[1] = 6 -a[2] = 7 -if a != [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]' -a[-2L] = 88 -a[-1L] = 99 -if a != [5,6,7,88,99]: raise TestFailed, 'list item assignment [-2L], [-1L]' -a[-2] = 8 -a[-1] = 9 -if a != [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]' -a[:2] = [0,4] -a[-3:] = [] -a[1:1] = [1,2,3] -if a != [0,1,2,3,4]: raise TestFailed, 'list slice assignment' -a[ 1L : 4L] = [7,8,9] -if a != [0,7,8,9,4]: raise TestFailed, 'list slice assignment using long ints' -del a[1:4] -if a != [0,4]: raise TestFailed, 'list slice deletion' -del a[0] -if a != [4]: raise TestFailed, 'list item deletion [0]' -del a[-1] -if a != []: raise TestFailed, 'list item deletion [-1]' -a=range(0,5) -del a[1L:4L] -if a != [0,4]: raise TestFailed, 'list slice deletion' -del a[0L] -if a != [4]: raise TestFailed, 'list item deletion [0]' -del a[-1L] -if a != []: raise TestFailed, 'list item deletion [-1]' -a=[] -a.append(0) -a.append(1) -a.append(2) -if a != [0,1,2]: raise TestFailed, 'list append' -a.insert(0, -2) -a.insert(1, -1) -a.insert(2,0) -if a != [-2,-1,0,0,1,2]: raise TestFailed, 'list insert' -b = a[:] -b.insert(-2, "foo") -b.insert(-200, "left") -b.insert(200, "right") -if b != ["left",-2,-1,0,0,"foo",1,2,"right"]: raise TestFailed, 'list insert2' -# a = [-2,-1,0,0,1,2] -if a.count(0) != 2: raise TestFailed, ' list count' -if a.index(0) != 2: raise TestFailed, 'list index' -if a.index(0,2) != 2: raise TestFailed, 'list index, start argument' -if a.index(0,-4) != 2: raise TestFailed, 'list index, -start argument' -if a.index(-2,-10) != 0: raise TestFailed, 'list index, very -start argument' -if a.index(0,3) != 3: raise TestFailed, 'list index, start argument' -if a.index(0,-3) != 3: raise TestFailed, 'list index, -start argument' -if a.index(0,3,4) != 3: raise TestFailed, 'list index, stop argument' -if a.index(0,-3,-2) != 3: raise TestFailed, 'list index, -stop argument' -if a.index(0,-4*sys.maxint,4*sys.maxint) != 2: - raise TestFailed, 'list index, -maxint, maxint argument' -try: - a.index(0, 4*sys.maxint,-4*sys.maxint) -except ValueError: - pass -else: - raise TestFailed, 'list index, maxint,-maxint argument' - -try: - a.index(2,0,-10) -except ValueError: - pass -else: - raise TestFailed, 'list index, very -stop argument' -a.remove(0) -try: - a.index(2,0,4) -except ValueError: - pass -else: - raise TestFailed, 'list index, stop argument.' -if a != [-2,-1,0,1,2]: raise TestFailed, 'list remove' -a.reverse() -if a != [2,1,0,-1,-2]: raise TestFailed, 'list reverse' -a.sort() -if a != [-2,-1,0,1,2]: raise TestFailed, 'list sort' -def revcmp(a, b): return cmp(b, a) -a.sort(revcmp) -if a != [2,1,0,-1,-2]: raise TestFailed, 'list sort with cmp func' -# The following dumps core in unpatched Python 1.5: -def myComparison(x,y): - return cmp(x%3, y%7) -z = range(12) -z.sort(myComparison) - -try: z.sort(2) -except TypeError: pass -else: raise TestFailed, 'list sort compare function is not callable' - -''' XXX TODO: add detection of list modification during sort -def selfmodifyingComparison(x,y): - z.append(1) - return cmp(x, y) -try: z.sort(selfmodifyingComparison) -except ValueError: pass -else: raise TestFailed, 'modifying list during sort' -''' - -try: z.sort(lambda x, y: 's') -except TypeError: pass -else: raise TestFailed, 'list sort compare function does not return int' - -# Test extreme cases with long ints -a = [0,1,2,3,4] -if a[ -pow(2,128L): 3 ] != [0,1,2]: - raise TestFailed, "list slicing with too-small long integer" -if a[ 3: pow(2,145L) ] != [3,4]: - raise TestFailed, "list slicing with too-large long integer" - -# extended slicing - -# subscript -a = [0,1,2,3,4] -vereq(a[::], a) -vereq(a[::2], [0,2,4]) -vereq(a[1::2], [1,3]) -vereq(a[::-1], [4,3,2,1,0]) -vereq(a[::-2], [4,2,0]) -vereq(a[3::-2], [3,1]) -vereq(a[-100:100:], a) -vereq(a[100:-100:-1], a[::-1]) -vereq(a[-100L:100L:2L], [0,2,4]) -vereq(a[1000:2000:2], []) -vereq(a[-1000:-2000:-2], []) -# deletion -del a[::2] -vereq(a, [1,3]) -a = range(5) -del a[1::2] -vereq(a, [0,2,4]) -a = range(5) -del a[1::-2] -vereq(a, [0,2,3,4]) -a = range(10) -del a[::1000] -vereq(a, [1, 2, 3, 4, 5, 6, 7, 8, 9]) -# assignment -a = range(10) -a[::2] = [-1]*5 -vereq(a, [-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]) -a = range(10) -a[::-4] = [10]*3 -vereq(a, [0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]) -a = range(4) -a[::-1] = a -vereq(a, [3, 2, 1, 0]) -a = range(10) -b = a[:] -c = a[:] -a[2:3] = ["two", "elements"] -b[slice(2,3)] = ["two", "elements"] -c[2:3:] = ["two", "elements"] -vereq(a, b) -vereq(a, c) -a = range(10) -a[::2] = tuple(range(5)) -vereq(a, [0, 1, 1, 3, 2, 5, 3, 7, 4, 9]) - -# Verify that __getitem__ overrides are not recognized by __iter__ -# XXX TODO same as class T(tuple) above -#class L(list): -# def __getitem__(self, key): -# return str(key) + '!!!' -#vereq(iter(L([1,2])).next(), 1) - - -print '6.6 Mappings == Dictionaries' -# calling built-in types without argument must return empty -if dict() != {}: raise TestFailed,'dict() does not return {}' -d = {} -if d.keys() != []: raise TestFailed, '{}.keys()' -if d.values() != []: raise TestFailed, '{}.values()' -if d.items() != []: raise TestFailed, '{}.items()' -if d.has_key('a') != 0: raise TestFailed, '{}.has_key(\'a\')' -if ('a' in d) != 0: raise TestFailed, "'a' in {}" -if ('a' not in d) != 1: raise TestFailed, "'a' not in {}" -if len(d) != 0: raise TestFailed, 'len({})' -d = {'a': 1, 'b': 2} -if len(d) != 2: raise TestFailed, 'len(dict)' -k = d.keys() -k.sort() -if k != ['a', 'b']: raise TestFailed, 'dict keys()' -if d.has_key('a') and d.has_key('b') and not d.has_key('c'): pass -else: raise TestFailed, 'dict keys()' -if 'a' in d and 'b' in d and 'c' not in d: pass -else: raise TestFailed, 'dict keys() # in/not in version' -if d['a'] != 1 or d['b'] != 2: raise TestFailed, 'dict item' -d['c'] = 3 -d['a'] = 4 -if d['c'] != 3 or d['a'] != 4: raise TestFailed, 'dict item assignment' -del d['b'] -if d != {'a': 4, 'c': 3}: raise TestFailed, 'dict item deletion' -print '6.6.1 dict methods' -# dict.clear() -d = {1:1, 2:2, 3:3} -d.clear() -if d != {}: raise TestFailed, 'dict clear' -# dict.update() -d.update({1:100}) -d.update({2:20}) -d.update({1:1, 2:2, 3:3}) -if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict update' -d.clear() -try: d.update(None) -except AttributeError: pass -else: raise TestFailed, 'dict.update(None), AttributeError expected' -print '6.6.2 user-dict methods' -class SimpleUserDict: - def __init__(self): - self.d = {1:1, 2:2, 3:3} - def keys(self): - return self.d.keys() - def __getitem__(self, i): - return self.d[i] -d.update(SimpleUserDict()) -if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict.update(instance)' -d.clear() -class FailingUserDict: - def keys(self): - raise ValueError -try: d.update(FailingUserDict()) -except ValueError: pass -else: raise TestFailed, 'dict.keys() expected ValueError' -class FailingUserDict: - def keys(self): - class BogonIter: - def __iter__(self): - raise ValueError - return BogonIter() -try: d.update(FailingUserDict()) -except ValueError: pass -else: raise TestFailed, 'iter(dict.keys()) expected ValueError' -class FailingUserDict: - def keys(self): - class BogonIter: - def __init__(self): - self.i = 1 - def __iter__(self): - return self - def next(self): - if self.i: - self.i = 0 - return 'a' - raise ValueError - return BogonIter() - def __getitem__(self, key): - return key -try: d.update(FailingUserDict()) -except ValueError: pass -else: raise TestFailed, 'iter(dict.keys()).next() expected ValueError' -class FailingUserDict: - def keys(self): - class BogonIter: - def __init__(self): - self.i = ord('a') - def __iter__(self): - return self - def next(self): - if self.i <= ord('z'): - rtn = chr(self.i) - self.i += 1 - return rtn - raise StopIteration - return BogonIter() - def __getitem__(self, key): - raise ValueError -try: d.update(FailingUserDict()) -except ValueError: pass -else: raise TestFailed, 'dict.update(), __getitem__ expected ValueError' -print '6.6.3 dict.fromkeys' -# dict.fromkeys() -if dict.fromkeys('abc') != {'a':None, 'b':None, 'c':None}: - raise TestFailed, 'dict.fromkeys did not work as a class method' -d = {} -if d.fromkeys('abc') is d: - raise TestFailed, 'dict.fromkeys did not return a new dict' -if d.fromkeys('abc') != {'a':None, 'b':None, 'c':None}: - raise TestFailed, 'dict.fromkeys failed with default value' -if d.fromkeys((4,5),0) != {4:0, 5:0}: - raise TestFailed, 'dict.fromkeys failed with specified value' -if d.fromkeys([]) != {}: - raise TestFailed, 'dict.fromkeys failed with null sequence' -def g(): - yield 1 -if d.fromkeys(g()) != {1:None}: - raise TestFailed, 'dict.fromkeys failed with a generator' -try: {}.fromkeys(3) -except TypeError: pass -else: raise TestFailed, 'dict.fromkeys failed to raise TypeError' -class dictlike(dict): pass -if dictlike.fromkeys('a') != {'a':None}: - raise TestFailed, 'dictsubclass.fromkeys did not inherit' -if dictlike().fromkeys('a') != {'a':None}: - raise TestFailed, 'dictsubclass.fromkeys did not inherit' -if type(dictlike.fromkeys('a')) is not dictlike: - raise TestFailed, 'dictsubclass.fromkeys created wrong type' -if type(dictlike().fromkeys('a')) is not dictlike: - raise TestFailed, 'dictsubclass.fromkeys created wrong type' - -from UserDict import UserDict -class mydict(dict): - def __new__(cls): - return UserDict() -ud = mydict.fromkeys('ab') -if ud != {'a':None, 'b':None} or not isinstance(ud,UserDict): - raise TestFailed, 'fromkeys did not instantiate using __new__' - -print '6.6.4 dict copy, get, setdefault' - -# dict.copy() -d = {1:1, 2:2, 3:3} -if d.copy() != {1:1, 2:2, 3:3}: raise TestFailed, 'dict copy' -if {}.copy() != {}: raise TestFailed, 'empty dict copy' -# dict.get() -d = {} -if d.get('c') is not None: raise TestFailed, 'missing {} get, no 2nd arg' -if d.get('c', 3) != 3: raise TestFailed, 'missing {} get, w/ 2nd arg' -d = {'a' : 1, 'b' : 2} -if d.get('c') is not None: raise TestFailed, 'missing dict get, no 2nd arg' -if d.get('c', 3) != 3: raise TestFailed, 'missing dict get, w/ 2nd arg' -if d.get('a') != 1: raise TestFailed, 'present dict get, no 2nd arg' -if d.get('a', 3) != 1: raise TestFailed, 'present dict get, w/ 2nd arg' -# dict.setdefault() -d = {} -if d.setdefault('key0') is not None: - raise TestFailed, 'missing {} setdefault, no 2nd arg' -if d.setdefault('key0') is not None: - raise TestFailed, 'present {} setdefault, no 2nd arg' -d.setdefault('key', []).append(3) -if d['key'][0] != 3: - raise TestFailed, 'missing {} setdefault, w/ 2nd arg' -d.setdefault('key', []).append(4) -if len(d['key']) != 2: - raise TestFailed, 'present {} setdefault, w/ 2nd arg' - -print '6.6.5 dict popitem' - -# dict.popitem() -for copymode in -1, +1: - # -1: b has same structure as a - # +1: b is a.copy() - for log2size in range(4):#(12): - size = 2**log2size - a = {} - b = {} - for i in range(size): - a[`i`] = i - if copymode < 0: - b[`i`] = i - if copymode > 0: - b = a.copy() - for i in range(size): - ka, va = ta = a.popitem() - if va != int(ka): raise TestFailed, "a.popitem: %s" % str(ta) - kb, vb = tb = b.popitem() - if vb != int(kb): raise TestFailed, "b.popitem: %s" % str(tb) - if copymode < 0 and ta != tb: - raise TestFailed, "a.popitem != b.popitem: %s, %s" % ( - str(ta), str(tb)) - if a: raise TestFailed, 'a not empty after popitems: %s' % str(a) - if b: raise TestFailed, 'b not empty after popitems: %s' % str(b) - -d.clear() -try: d.popitem() -except KeyError: pass -else: raise TestFailed, "{}.popitem doesn't raise KeyError" - -print '6.6.6 dict pop' - -# Tests for pop with specified key -d.clear() -k, v = 'abc', 'def' -d[k] = v -try: d.pop('ghi') -except KeyError: pass -else: raise TestFailed, "{}.pop(k) doesn't raise KeyError when k not in dictionary" - -if d.pop(k) != v: raise TestFailed, "{}.pop(k) doesn't find known key/value pair" -if len(d) > 0: raise TestFailed, "{}.pop(k) failed to remove the specified pair" - -try: d.pop(k) -except KeyError: pass -else: raise TestFailed, "{}.pop(k) doesn't raise KeyError when dictionary is empty" - -# verify longs/ints get same value when key > 32 bits (for 64-bit archs) -# see SF bug #689659 -x = 4503599627370496L -y = 4503599627370496 -h = {x: 'anything', y: 'something else'} -if h[x] != h[y]: - raise TestFailed, "long/int key should match" - -if d.pop(k, v) != v: raise TestFailed, "{}.pop(k, v) doesn't return default value" -d[k] = v -if d.pop(k, 1) != v: raise TestFailed, "{}.pop(k, v) doesn't find known key/value pair" - -''' TODO: doesn't raise correctly -d[1] = 1 -try: - for i in d: - d[i+1] = 1 -except RuntimeError: - pass -else: - raise TestFailed, "changing dict size during iteration doesn't raise Error" -''' - -print '6.7 type' - -try: type(1, 2) -except TypeError: pass -else: raise TestFailed, 'type(), w/2 args expected TypeError' - -try: type(1, 2, 3, 4) -except TypeError: pass -else: raise TestFailed, 'type(), w/4 args expected TypeError' - -''' TODO: No buffer support yet XXX -print '6.8 buffer' - -try: buffer('asdf', -1) -except ValueError: pass -else: raise TestFailed, "buffer('asdf', -1) should raise ValueError" - -try: buffer(None) -except TypeError: pass -else: raise TestFailed, "buffer(None) should raise TypeError" - -a = buffer('asdf') -hash(a) -b = a * 5 -if a == b: - raise TestFailed, 'buffers should not be equal' -if str(b) != ('asdf' * 5): - raise TestFailed, 'repeated buffer has wrong content' -if str(a * 0) != '': - raise TestFailed, 'repeated buffer zero times has wrong content' -if str(a + buffer('def')) != 'asdfdef': - raise TestFailed, 'concatenation of buffers yields wrong content' - -try: a[1] = 'g' -except TypeError: pass -else: raise TestFailed, "buffer assignment should raise TypeError" - -try: a[0:1] = 'g' -except TypeError: pass -else: raise TestFailed, "buffer slice assignment should raise TypeError" -''' -print '6.99999999... All tests ran to completion' diff --git a/pypy/appspace/test/no_test_stringmodule.py b/pypy/appspace/test/no_test_stringmodule.py deleted file mode 100644 index 9d257d7ebf..0000000000 --- a/pypy/appspace/test/no_test_stringmodule.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python - - -""" -Test module for functions in stringmodule.py - -""" - -import string as c_py_string -import unittest - -import autopath -from pypy.tool import testit -from pypy.appspace import string as pypy_string - -class TestStringmodule(unittest.TestCase): - def regression(self, sFuncname, *args, **kwargs): - try: - c_py_res = getattr(c_py_string, sFuncname)(*args, **kwargs) - except Exception, ce: - c_py_res = ce.__class__ - - try: - pypy_res = getattr(pypy_string, sFuncname)(*args, **kwargs) - except Exception, pe: - pypy_res = pe.__class__ - - self.assertEqual(c_py_res, pypy_res, 'not equal \n1:<%s>\n2:<%s>' % (c_py_res, pypy_res)) - - - def test_maketrans(self): - self.regression('maketrans','','') - self.regression('maketrans','a','b') - self.regression('maketrans','aa','bb') - self.regression('maketrans','aa','') - - -if __name__ == "__main__": - testit.main()
\ No newline at end of file diff --git a/pypy/appspace/test/support_tests.py b/pypy/appspace/test/support_tests.py deleted file mode 100644 index e051bd3083..0000000000 --- a/pypy/appspace/test/support_tests.py +++ /dev/null @@ -1,180 +0,0 @@ -"""Supporting definitions for the Python regression tests.""" - -''' -if __name__ != 'test.test_support': - raise ImportError, 'test_support must be imported from the test package' -''' - -import sys -from os import unlink - -import py -TESTFN = str(py.test.config.tmpdir.join('@test')) - -class Error(Exception): - """Base class for regression test exceptions.""" - -class TestFailed(Error): - """Test failed.""" - -class TestSkipped(Error): - """Test skipped. - - This can be raised to indicate that a test was deliberatly - skipped, but not because a feature wasn't available. For - example, if some resource can't be used, such as the network - appears to be unavailable, this should be raised instead of - TestFailed. - """ - -class ResourceDenied(TestSkipped): - """Test skipped because it requested a disallowed resource. - - This is raised when a test calls requires() for a resource that - has not be enabled. It is used to distinguish between expected - and unexpected skips. - """ - -verbose = 0 # Flag set to 0 by regrtest.py -use_resources = None # Flag set to [] by regrtest.py - -# _original_stdout is meant to hold stdout at the time regrtest began. -# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever. -# The point is to have some flavor of stdout the user can actually see. -_original_stdout = None -def record_original_stdout(stdout): - global _original_stdout - _original_stdout = stdout - -def get_original_stdout(): - return _original_stdout or sys.stdout - -def unload(name): - try: - del sys.modules[name] - except KeyError: - pass - -def forget(modname): - '''"Forget" a module was ever imported by removing it from sys.modules and - deleting any .pyc and .pyo files.''' - unload(modname) - import os - for dirname in sys.path: - try: - os.unlink(os.path.join(dirname, modname + os.extsep + 'pyc')) - except os.error: - pass - # Deleting the .pyo file cannot be within the 'try' for the .pyc since - # the chance exists that there is no .pyc (and thus the 'try' statement - # is exited) but there is a .pyo file. - try: - os.unlink(os.path.join(dirname, modname + os.extsep + 'pyo')) - except os.error: - pass - -def is_resource_enabled(resource): - """Test whether a resource is enabled. Known resources are set by - regrtest.py.""" - return use_resources is not None and resource in use_resources - -def requires(resource, msg=None): - """Raise ResourceDenied if the specified resource is not available. - - If the caller's module is __main__ then automatically return True. The - possibility of False being returned occurs when regrtest.py is executing.""" - # see if the caller's module is __main__ - if so, treat as if - # the resource was set - if sys._getframe().f_back.f_globals.get("__name__") == "__main__": - return - if not is_resource_enabled(resource): - if msg is None: - msg = "Use of the `%s' resource not enabled" % resource - raise ResourceDenied(msg) - -FUZZ = 1e-6 - -def fcmp(x, y): # fuzzy comparison function - if type(x) == type(0.0) or type(y) == type(0.0): - try: - x, y = float(x), float(y) - fuzz = (abs(x) + abs(y)) * FUZZ - if abs(x-y) <= fuzz: - return 0 - except: - pass - elif type(x) == type(y) and type(x) in (type(()), type([])): - for i in range(min(len(x), len(y))): - outcome = fcmp(x[i], y[i]) - if outcome != 0: - return outcome - return cmp(len(x), len(y)) - return cmp(x, y) - -try: - unicode - have_unicode = 0 # XXX UNICODE 1 -except NameError: - have_unicode = 0 - -is_jython = sys.platform.startswith('java') - - - -##if fp is not None: -## fp.close() -##del fp - -def findfile(file, here=__file__): - """Try to find a file on sys.path and the working directory. If it is not - found the argument passed to the function is returned (this does not - necessarily signal failure; could still be the legitimate path).""" - import os - if os.path.isabs(file): - return file - path = sys.path - path = [os.path.dirname(here)] + path - for dn in path: - fn = os.path.join(dn, file) - if os.path.exists(fn): return fn - return file - -def verify(condition, reason='test failed'): - """Verify that condition is true. If not, raise TestFailed. - - The optional argument reason can be given to provide - a better error text. - """ - - if not condition: - raise TestFailed(reason) - -def vereq(a, b): - """Raise TestFailed if a == b is false. - - This is better than verify(a == b) because, in case of failure, the - error message incorporates repr(a) and repr(b) so you can see the - inputs. - - Note that "not (a == b)" isn't necessarily the same as "a != b"; the - former is tested. - """ - - if not (a == b): - raise TestFailed, "%r == %r" % (a, b) - -def sortdict(dict): - "Like repr(dict), but in sorted order." - items = dict.items() - items.sort() - reprpairs = ["%r: %r" % pair for pair in items] - withcommas = ", ".join(reprpairs) - return "{%s}" % withcommas - -def check_syntax(statement): - try: - compile(statement, '<string>', 'exec') - except SyntaxError: - pass - else: - print 'Missing SyntaxError: "%s"' % statement diff --git a/pypy/appspace/test/test_cmathmodule.py b/pypy/appspace/test/test_cmathmodule.py deleted file mode 100644 index 5667c2e694..0000000000 --- a/pypy/appspace/test/test_cmathmodule.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -# taken from CPython 2.3 - -""" -Test module for functions in cmathmodule.py - -It seems the log and log10 functions are generating errors -due to numerical problems with floor() in complex.__div__. -""" - -import math -import cmath -import sys -import unittest -import autopath - -from pypy.appspace import cmathmodule -from pypy.appspace.test.test_complexobject import equal - -def enumerate(): - valueRange = [-12.34, -3, -1, -0.5, 0, 0.5, 1, 3, 12.34] - res = [] - for x0 in valueRange: - for y0 in valueRange: - z = complex(x0,y0) - res.append(z) - return res - - - -class TestCMathModule: - - def assertAEqual(self, a, b): - if not equal(a, b): - raise self.failureException, '%s ~== %s'%(a, b) - - def test_funcs(self): - "Compare many functions with CPython." - - for z in enumerate(): - - for op in "sqrt acos acosh asin asinh atan atanh cos cosh exp".split(): - if op == "atan" and equal(z, complex(0,-1)) or equal(z, complex(0,1)): - continue - if op == "atanh" and equal(z, complex(-1,0)) or equal(z, complex(1,0)): - continue - op0 = cmath.__dict__[op](z) - op1 = cmathmodule.__dict__[op](z) - self.assertAEqual(op0, op1) - - - def test_log_log10(self): - "Compare log/log10 functions with CPython." - - for z in enumerate(): - for op in "log log10".split(): - if z != 0: - op0 = cmath.__dict__[op](z) - op1 = cmathmodule.__dict__[op](z) - self.assertAEqual(op0, op1) - diff --git a/pypy/appspace/test/test_complexobject.py b/pypy/appspace/test/test_complexobject.py deleted file mode 100644 index 47a3705127..0000000000 --- a/pypy/appspace/test/test_complexobject.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/env python - -""" - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -Note that this test currently runs at cpython-level and not -at any application level .... - -""" -#taken from CPython 2.3 (?) - -""" -Test module for class complex in complexobject.py - -As it seems there are some numerical differences in -the __div__ and __divmod__ methods which have to be -sorted out. -""" - -import autopath - -import math -import cmath -import sys -import types -import unittest - -from pypy.tool import testit -#from pypy.appspace.complexobject import complex as pycomplex -from pypy.module.test.applevel_in_cpython import applevel_in_cpython -our_own_builtin = applevel_in_cpython('__builtin__') -pycomplex = our_own_builtin.complex - - -try: - unicode - have_unicode = 0 # pypy doesn't have unicode, we know it ... -except NameError: - have_unicode = 0 - - -def equal(a, b): - "Compare two complex or normal numbers. 0 if different, 1 if roughly equal." - - numTypes = [types.IntType, types.LongType, types.FloatType] - da, db = dir(a), dir(b) - - if 'real' in da and 'real' in db and 'imag' in da and 'imag' in db: - if math.fabs(a.real-b.real) > 1e-10: - return 0 - if math.fabs(a.imag-b.imag) > 1e-10: - return 0 - else: - return 1 - elif type(a) in numTypes and type(b) in numTypes: - if math.fabs(a-b) > 1e-10: - return 0 - else: - return 1 - - - - -def enumerate(): - valueRange = [-3, -0.5, 0, 1] - res = [] - for x0 in valueRange: - for y0 in valueRange: - for x1 in valueRange: - for y1 in valueRange: - z0c = complex(x0,y0) - z1c = complex(x1,y1) - z0p = pycomplex(x0,y0) - z1p = pycomplex(x1,y1) - res.append((z0c, z1c, z0p, z1p)) - - return res - - - -class TestComplex(unittest.TestCase): - - def assertAEqual(self, a, b): - if not equal(a, b): - raise self.failureException, '%s ~== %s'%(a, b) - - def test_wrongInit1(self): - "Compare wrong init. with CPython." - - try: - complex("1", "1") - except TypeError: - pass - else: - self.fail('complex("1", "1")') - - try: - pycomplex("1", "1") - except TypeError: - pass - else: - self.fail('complex("1", "1")') - - - def test_wrongInit2(self): - "Compare wrong init. with CPython." - - try: - complex(1, "1") - except TypeError: - pass - else: - self.fail('complex(1, "1")') - - try: - pycomplex(1, "1") - except TypeError: - pass - else: - self.fail('complex(1, "1")') - - - def test_wrongInitFromString(self): - "Compare string init. with CPython." - - if complex(" 3.14+J ") != 3.14+1j: - self.fail('complex(" 3.14+J )"') - if not equal(pycomplex(" 3.14+J "), pycomplex(3.14,1)): - self.fail('complex(" 3.14+J )"') - - - def test_wrongInitFromUnicodeString(self): - "Compare unicode string init. with CPython." - - if have_unicode: - if complex(unicode(" 3.14+J ")) != 3.14+1j: - self.fail('complex(u" 3.14+J )"') - if not equal(pycomplex(unicode(" 3.14+J ")), pycomplex(3.14, 1)): - self.fail('complex(u" 3.14+J )"') - - - def test_class(self): - "Compare class with CPython." - - class Z: - def __complex__(self): - return 3.14j - z = Z() - if complex(z) != 3.14j: - self.fail('complex(classinstance)') - - if not equal(complex(z), pycomplex(0, 3.14)): - self.fail('complex(classinstance)') - - - def test_add_sub_mul_div(self): - "Compare add/sub/mul/div with CPython." - - for (z0c, z1c, z0p, z1p) in enumerate(): - mc = z0c*z1c - mp = z0p*z1p - self.assertAEqual(mc, mp) - - sc = z0c+z1c - sp = z0p+z1p - self.assertAEqual(sc, sp) - - dc = z0c-z1c - dp = z0p-z1p - self.assertAEqual(dc, dp) - - if not equal(z1c, complex(0,0)): - qc = z0c/z1c - qp = z0p/z1p - self.assertAEqual(qc, qp) - - - def test_special(self): - "Compare special methods with CPython." - - for (x, y) in [(0,0), (0,1), (1,3.)]: - zc = complex(x, y) - zp = pycomplex(x, y) - - self.assertAEqual(zc, zp) - self.assertAEqual(-zc, -zp) - self.assertAEqual(+zc, +zp) - self.assertAEqual(abs(zc), abs(zp)) - self.assertAEqual(zc, zp) - #self.assertEqual(zc.conjugate(), zp.conjugate()) XXX - self.assertEqual(str(zc), str(zp)) - self.assertEqual(hash(zc), hash(zp)) - - - # this fails on python2.3 and is depreacted anyway - def _test_divmod(self): - "Compare divmod with CPython." - - for (z0c, z1c, z0p, z1p) in enumerate(): - mc = z0c*z1c - mp = z0p*z1p - self.assertAEqual(mc, mp) - - if not equal(z1c, complex(0,0)): - ddc, mmc = divmod(z0c, z1c) - self.assertAEqual(ddc*z1c + mmc, z0c) - ddp, mmp = divmod(z0p, z1p) - self.assertAEqual(ddp*z1p + mmp, z0p) - self.assertAEqual(ddc, ddp) - self.assertAEqual(mmc, mmp) - - - # these fail on python2.3 - def _test_mod(self): - "Compare mod with CPython." - - for (z0c, z1c, z0p, z1p) in enumerate(): - mc = z0c*z1c - mp = z0p*z1p - self.assertAEqual(mc, mp) - - if not equal(z1c, complex(0,0)): - rc = z0c%z1c - rp = z0p%z1p - self.assertAEqual(rc, rp) - - def test_div(self): - "Compare mod with CPython." - - for (z0c, z1c, z0p, z1p) in enumerate(): - mc = z0c*z1c - mp = z0p*z1p - self.assertAEqual(mc, mp) - - if not equal(z1c, complex(0,0)): - rc = z0c/z1c - rp = z0p/z1p - self.assertAEqual(rc, rp) - - - def test_pow(self): - "Compare pow with CPython." - - for (z0c, z1c, z0p, z1p) in enumerate(): - if not equal(z0c, 0j) and (z1c.imag != 0.0): - pc = z0c**z1c - pp = z0p**z1p - self.assertAEqual(pc, pp) - pc = z0c**z0c.real - pp = z0p**z0p.real - self.assertAEqual(pc, pp) - -if __name__ == "__main__": - testit.main() diff --git a/pypy/appspace/test/test_exceptions.py b/pypy/appspace/test/test_exceptions.py deleted file mode 100644 index a50541b7a6..0000000000 --- a/pypy/appspace/test/test_exceptions.py +++ /dev/null @@ -1,5 +0,0 @@ -import autopath - -def app_test_import(): - import exceptions - assert exceptions.SyntaxError is SyntaxError diff --git a/pypy/appspace/test/test_file.py b/pypy/appspace/test/test_file.py deleted file mode 100644 index b77d27c93c..0000000000 --- a/pypy/appspace/test/test_file.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import autopath -from pypy.appspace import _file -from pypy.tool.udir import udir -import py -import unittest - -class TestFile: - def setup_method(self, method): - filename = os.path.join(autopath.this_dir, 'test_file.py') - self.fd = _file.file_(filename, 'r') - - def teardown_method(self, method): - self.fd.close() - - def test_case_1(self): - assert self.fd.tell() == 0 - - def test_case_readonly(self): - fn = str(udir.join('temptestfile')) - f=_file.file_(fn, 'w') - assert f.name == fn - assert f.mode == 'w' - assert f.closed == False - assert f.encoding == None # Fix when we find out what this is - py.test.raises(TypeError, setattr, f, 'name', 42) diff --git a/pypy/appspace/test/test_md5.py b/pypy/appspace/test/test_md5.py deleted file mode 100644 index d20dfc9b06..0000000000 --- a/pypy/appspace/test/test_md5.py +++ /dev/null @@ -1,241 +0,0 @@ -"""A test script to compare MD5 implementations. - -A note about performance: the pure Python MD5 takes roughly -160 sec. per MB of data on a 233 MHz Intel Pentium CPU. -""" - -import autopath -import string, unittest -import md5 # CPython's implementation in C. -from pypy.appspace import md5 as pymd5 # The pure Python implementation. - - -# Helpers... - -def formatHex(str): - "Print a string's HEX code in groups of two digits." - - d = map(None, str) - d = map(ord, d) - d = map(lambda x:"%02x" % x, d) - return string.join(d, " ") - - -def format(str): - "Print a string as-is in groups of two characters." - - s = '' - for i in range(0, len(str)-1, 2): - s = s + "%03s" % str[i:i+2] - return s[1:] - - -def printDiff(message, d1, d2, expectedResult=None): - "Print different outputs for same message." - - print "Message: '%s'" % message - print "Message length: %d" % len(message) - if expectedResult: - print "%-48s (expected)" % format(expectedResult) - print "%-48s (Std. lib. MD5)" % formatHex(d1) - print "%-48s (Pure Python MD5)" % formatHex(d2) - print - - -# The real comparison function. - -def compareImp(message): - """Compare two MD5 implementations, C vs. pure Python module. - - For equal digests this returns None, otherwise it returns - a tuple of both digests. - """ - - # Use Python's standard library MD5 compiled C module. - m1 = md5.md5() - m1.update(message) - d1 = m1.digest() - d1h = m1.hexdigest() - - # Use MD5 module in pure Python. - m2 = pymd5.md5() - m2.update(message) - d2 = m2.digest() - d2h = m2.hexdigest() - - # Return None if equal or the different digests if not equal. - if d1 == d2 and d1h == d2h: - return - else: - return d1, d2 - - -class MD5CompareTestCase(unittest.TestCase): - "Compare pure Python MD5 against Python's std. lib. version." - - def test1(self): - "Test cases with known digest result." - - cases = ( - ("", - "d41d8cd98f00b204e9800998ecf8427e"), - ("a", - "0cc175b9c0f1b6a831c399e269772661"), - ("abc", - "900150983cd24fb0d6963f7d28e17f72"), - ("message digest", - "f96b697d7cb7938d525a2f31aaf161d0"), - ("abcdefghijklmnopqrstuvwxyz", - "c3fcd3d76192e4007dfb496cca67e13b"), - ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - "d174ab98d277d9f5a5611c2c9f419d9f"), - ("1234567890"*8, - "57edf4a22be3c955ac49da2e2107b67a"), - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) - try: - assert res == None - except AssertionError, details: - d1, d2 = res - message, expectedResult = cases[i][0], None - if len(cases[i]) == 2: - expectedResult = cases[i][1] - printDiff(message, d1, d2, expectedResult) - - - def test2(self): - "Test cases without known digest result." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) - try: - assert res == None - except AssertionError, details: - d1, d2 = res - message = cases[i][0] - printDiff(message, d1, d2) - - - def test3(self): - "Test cases with long messages (can take a while)." - - cases = ( - (2**10*'a',), - (2**10*'abcd',), -## (2**20*'a',), ## 1 MB, takes about 160 sec. on a 233 Mhz Pentium. - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) - try: - assert res == None - except AssertionError, details: - d1, d2 = res - message = cases[i][0] - printDiff(message, d1, d2) - - - def test4(self): - "Test cases with increasingly growing message lengths." - - i = 0 - while i < 2**5: - message = i * 'a' - res = compareImp(message) - try: - assert res == None - except AssertionError, details: - d1, d2 = res - printDiff(message, d1, d2) - i = i + 1 - - - def test5(self): - "Test updating cloned objects." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - # Load both with same prefix. - prefix1 = 2**10 * 'a' - - m1 = md5.md5() - m1.update(prefix1) - m1c = m1.copy() - - m2 = pymd5.md5() - m2.update(prefix1) - m2c = m2.copy() - - # Update and compare... - for i in xrange(len(cases)): - message = cases[i][0] - - m1c.update(message) - d1 = m1c.hexdigest() - - m2c.update(message) - d2 = m2c.hexdigest() - - assert d1 == d2 - - -def makeSuite(): - suite = unittest.TestSuite() - - suite.addTest(MD5CompareTestCase('test1')) - suite.addTest(MD5CompareTestCase('test2')) - suite.addTest(MD5CompareTestCase('test3')) - suite.addTest(MD5CompareTestCase('test4')) - suite.addTest(MD5CompareTestCase('test5')) - - return suite - - -if __name__ == "__main__": - unittest.TextTestRunner().run(makeSuite()) diff --git a/pypy/appspace/test/test_sha.py b/pypy/appspace/test/test_sha.py deleted file mode 100644 index 521a4bacf4..0000000000 --- a/pypy/appspace/test/test_sha.py +++ /dev/null @@ -1,25 +0,0 @@ -# Testing sha module (NIST's Secure Hash Algorithm) - -# use the three examples from Federal Information Processing Standards -# Publication 180-1, Secure Hash Standard, 1995 April 17 -# http://www.itl.nist.gov/div897/pubs/fip180-1.htm - -import autopath -from pypy.appspace import sha - -class TestSHA: - def check(self, data, digest): - computed = sha.new(data).hexdigest() - assert computed == digest - - def test_case_1(self): - self.check("abc", - "a9993e364706816aba3e25717850c26c9cd0d89d") - - def test_case_2(self): - self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", - "84983e441c3bd26ebaae4aa1f95129e5e54670f1") - - def disabled_too_slow_test_case_3(self): - self.check("a" * 1000000, - "34aa973cd4c4daa4f61eeb2bdbad27316534016f") diff --git a/pypy/appspace/test/test_sio.py b/pypy/appspace/test/test_sio.py deleted file mode 100644 index 71d5accee0..0000000000 --- a/pypy/appspace/test/test_sio.py +++ /dev/null @@ -1,732 +0,0 @@ -"""Unit tests for sio (new standard I/O).""" - -import os -import time -import tempfile - -from pypy.appspace import sio - -class TestSource(object): - - def __init__(self, packets): - for x in packets: - assert x - self.orig_packets = list(packets) - self.packets = list(packets) - self.pos = 0 - - def tell(self): - return self.pos - - def seek(self, offset, whence=0): - if whence == 1: - offset += self.pos - elif whence == 2: - for packet in self.orig_packets: - offset += len(packet) - else: - assert whence == 0 - self.packets = list(self.orig_packets) - self.pos = 0 - while self.pos < offset: - data = self.read(offset - self.pos) - if not data: - break - assert self.pos == offset - - def read(self, n): - try: - data = self.packets.pop(0) - except IndexError: - return "" - if len(data) > n: - data, rest = data[:n], data[n:] - self.packets.insert(0, rest) - self.pos += len(data) - return data - - def close(self): - pass - -class TestReader(object): - - def __init__(self, packets): - for x in packets: - assert x - self.orig_packets = list(packets) - self.packets = list(packets) - self.pos = 0 - - def tell(self): - return self.pos - - def seek(self, offset, whence=0): - if whence == 1: - offset += self.pos - elif whence == 2: - for packet in self.orig_packets: - offset += len(packet) - else: - assert whence == 0 - self.packets = list(self.orig_packets) - self.pos = 0 - while self.pos < offset: - data = self.read(offset - self.pos) - if not data: - break - assert self.pos == offset - - def read(self, n): - try: - data = self.packets.pop(0) - except IndexError: - return "" - if len(data) > n: - data, rest = data[:n], data[n:] - self.packets.insert(0, rest) - self.pos += len(data) - return data - - def close(self): - pass - -class TestWriter(object): - - def __init__(self, data=''): - self.buf = data - self.pos = 0 - - def write(self, data): - if self.pos >= len(self.buf): - self.buf += "\0" * (self.pos - len(self.buf)) + data - self.pos = len(self.buf) - else: - self.buf = (self.buf[:self.pos] + data + - self.buf[self.pos + len(data):]) - self.pos += len(data) - - def tell(self): - return self.pos - - def seek(self, offset, whence=0): - if whence == 0: - pass - elif whence == 1: - offset += self.pos - elif whence == 2: - offset += len(self.buf) - else: - raise ValueError, "whence should be 0, 1 or 2" - if offset < 0: - offset = 0 - self.pos = offset - - def close(self): - pass - - def truncate(self, size=None): - if size is None: - size = self.pos - if size <= len(self.buf): - self.buf = self.buf[:size] - else: - self.buf += '\0' * (size - len(self.buf)) - -class TestReaderWriter(TestWriter): - - def read(self, n=-1): - if n < 1: - result = self.buf[self.pos: ] - self.pos = len(self.buf) - else: - if self.pos + n > len(self.buf): - n = len(self.buf) - self.pos - result = self.buf[self.pos: self.pos+n] - self.pos += n - return result - -class TestBufferingInputStreamTests: - - packets = ["a", "b", "\n", "def", "\nxy\npq\nuv", "wx"] - lines = ["ab\n", "def\n", "xy\n", "pq\n", "uvwx"] - - def makeStream(self, tell=False, seek=False, bufsize=None): - base = TestSource(self.packets) - if not tell: - base.tell = None - if not seek: - base.seek = None - return sio.BufferingInputStream(base, bufsize) - - def test_readline(self): - file = self.makeStream() - assert list(iter(file.readline, "")) == self.lines - - def test_readlines(self): - # This also tests next() and __iter__() - file = self.makeStream() - assert file.readlines() == self.lines - - def test_readlines_small_bufsize(self): - file = self.makeStream(bufsize=1) - assert list(file) == self.lines - - def test_readall(self): - file = self.makeStream() - assert file.readall() == "".join(self.lines) - - def test_readall_small_bufsize(self): - file = self.makeStream(bufsize=1) - assert file.readall() == "".join(self.lines) - - def test_readall_after_readline(self): - file = self.makeStream() - assert file.readline() == self.lines[0] - assert file.readline() == self.lines[1] - assert file.readall() == "".join(self.lines[2:]) - - def test_read_1_after_readline(self): - file = self.makeStream() - assert file.readline() == "ab\n" - assert file.readline() == "def\n" - blocks = [] - while 1: - block = file.read(1) - if not block: - break - blocks.append(block) - assert file.read(0) == "" - assert blocks == list("".join(self.lines)[7:]) - - def test_read_1(self): - file = self.makeStream() - blocks = [] - while 1: - block = file.read(1) - if not block: - break - blocks.append(block) - assert file.read(0) == "" - assert blocks == list("".join(self.lines)) - - def test_read_2(self): - file = self.makeStream() - blocks = [] - while 1: - block = file.read(2) - if not block: - break - blocks.append(block) - assert file.read(0) == "" - assert blocks == ["ab", "\nd", "ef", "\nx", "y\n", "pq", - "\nu", "vw", "x"] - - def test_read_4(self): - file = self.makeStream() - blocks = [] - while 1: - block = file.read(4) - if not block: - break - blocks.append(block) - assert file.read(0) == "" - assert blocks == ["ab\nd", "ef\nx", "y\npq", "\nuvw", "x"] - - def test_read_4_after_readline(self): - file = self.makeStream() - assert file.readline() == "ab\n" - assert file.readline() == "def\n" - blocks = [file.read(4)] - while 1: - block = file.read(4) - if not block: - break - blocks.append(block) - assert file.read(0) == "" - assert blocks == ["xy\np", "q\nuv", "wx"] - - def test_read_4_small_bufsize(self): - file = self.makeStream(bufsize=1) - blocks = [] - while 1: - block = file.read(4) - if not block: - break - blocks.append(block) - assert blocks == ["ab\nd", "ef\nx", "y\npq", "\nuvw", "x"] - - def test_tell_1(self): - file = self.makeStream(tell=True) - pos = 0 - while 1: - assert file.tell() == pos - n = len(file.read(1)) - if not n: - break - pos += n - - def test_tell_1_after_readline(self): - file = self.makeStream(tell=True) - pos = 0 - pos += len(file.readline()) - assert file.tell() == pos - pos += len(file.readline()) - assert file.tell() == pos - while 1: - assert file.tell() == pos - n = len(file.read(1)) - if not n: - break - pos += n - - def test_tell_2(self): - file = self.makeStream(tell=True) - pos = 0 - while 1: - assert file.tell() == pos - n = len(file.read(2)) - if not n: - break - pos += n - - def test_tell_4(self): - file = self.makeStream(tell=True) - pos = 0 - while 1: - assert file.tell() == pos - n = len(file.read(4)) - if not n: - break - pos += n - - def test_tell_readline(self): - file = self.makeStream(tell=True) - pos = 0 - while 1: - assert file.tell() == pos - n = len(file.readline()) - if not n: - break - pos += n - - def test_seek(self): - file = self.makeStream(tell=True, seek=True) - all = file.readall() - end = len(all) - for readto in range(0, end+1): - for seekto in range(0, end+1): - for whence in 0, 1, 2: - file.seek(0) - assert file.tell() == 0 - head = file.read(readto) - assert head == all[:readto] - if whence == 1: - offset = seekto - readto - elif whence == 2: - offset = seekto - end - else: - offset = seekto - file.seek(offset, whence) - here = file.tell() - assert here == seekto - rest = file.readall() - assert rest == all[seekto:] - - def test_seek_noseek(self): - file = self.makeStream() - all = file.readall() - end = len(all) - for readto in range(0, end+1): - for seekto in range(readto, end+1): - for whence in 1, 2: - file = self.makeStream() - head = file.read(readto) - assert head == all[:readto] - if whence == 1: - offset = seekto - readto - elif whence == 2: - offset = seekto - end - file.seek(offset, whence) - rest = file.readall() - assert rest == all[seekto:] - -class TestBufferingOutputStream: - - def test_write(self): - base = TestWriter() - filter = sio.BufferingOutputStream(base, 4) - filter.write("123") - assert base.buf == "" - assert filter.tell() == 3 - filter.write("456") - assert base.buf == "1234" - filter.write("789ABCDEF") - assert base.buf == "123456789ABC" - filter.write("0123") - assert base.buf == "123456789ABCDEF0" - assert filter.tell() == 19 - filter.close() - assert base.buf == "123456789ABCDEF0123" - - def test_write_seek(self): - base = TestWriter() - filter = sio.BufferingOutputStream(base, 4) - filter.write("x"*6) - filter.seek(3) - filter.write("y"*2) - filter.close() - assert base.buf == "x"*3 + "y"*2 + "x"*1 - - def test_write_seek_beyond_end(self): - "Linux behaviour. May be different on other platforms." - base = TestWriter() - filter = sio.BufferingOutputStream(base, 4) - filter.seek(3) - filter.write("y"*2) - filter.close() - assert base.buf == "\0"*3 + "y"*2 - - def test_truncate(self): - "Linux behaviour. May be different on other platforms." - base = TestWriter() - filter = sio.BufferingOutputStream(base, 4) - filter.write('x') - filter.truncate(4) - filter.write('y') - filter.close() - assert base.buf == 'xy' + '\0' * 2 - - def test_truncate2(self): - "Linux behaviour. May be different on other platforms." - base = TestWriter() - filter = sio.BufferingOutputStream(base, 4) - filter.write('12345678') - filter.truncate(4) - filter.write('y') - filter.close() - assert base.buf == '1234' + '\0' * 4 + 'y' - -class TestLineBufferingOutputStreamTests: - - def test_write(self): - base = TestWriter() - filter = sio.LineBufferingOutputStream(base) - filter.bufsize = 4 # More handy for testing than the default - filter.write("123") - assert base.buf == "" - assert filter.tell() == 3 - filter.write("456") - assert base.buf == "1234" - filter.write("789ABCDEF\n") - assert base.buf == "123456789ABCDEF\n" - filter.write("0123") - assert base.buf == "123456789ABCDEF\n0123" - assert filter.tell() == 20 - filter.close() - assert base.buf == "123456789ABCDEF\n0123" - - def xtest_write_seek(self): - base = TestWriter() - filter = sio.BufferingOutputStream(base, 4) - filter.write("x"*6) - filter.seek(3) - filter.write("y"*2) - filter.close() - assert base.buf == "x"*3 + "y"*2 + "x"*1 - -class TestBufferingInputOutputStreamTests: - - def test_write(self): - base = TestReaderWriter() - filter = sio.BufferingInputOutputStream(base, 4) - filter.write("123456789") - assert base.buf == "12345678" - s = filter.read() - assert base.buf == "123456789" - filter.write("01234") - assert base.buf == "1234567890123" - filter.seek(4,0) - assert base.buf == "12345678901234" - assert filter.read(3) == "567" - filter.write('x') - filter.flush() - assert base.buf == "1234567x901234" - - def test_write_seek_beyond_end(self): - "Linux behaviour. May be different on other platforms." - base = TestReaderWriter() - filter = sio.BufferingInputOutputStream(base, 4) - filter.seek(3) - filter.write("y"*2) - filter.close() - assert base.buf == "\0"*3 + "y"*2 - -class TestCRLFFilter: - - def test_filter(self): - packets = ["abc\ndef\rghi\r\nxyz\r", "123\r", "\n456"] - expected = ["abc\ndef\nghi\nxyz\n", "123\n", "456"] - crlf = sio.CRLFFilter(TestSource(packets)) - blocks = [] - while 1: - block = crlf.read(100) - if not block: - break - blocks.append(block) - assert blocks == expected - -class TestMMapFile(TestBufferingInputStreamTests): - tfn = None - - def teardown_method(self, method): - tfn = self.tfn - if tfn: - self.tfn = None - try: - os.remove(tfn) - except os.error, msg: - print "can't remove %s: %s" % (tfn, msg) - - def makeStream(self, tell=None, seek=None, bufsize=None, mode="r"): - self.tfn = tempfile.mktemp() - f = open(self.tfn, "wb") - f.writelines(self.packets) - f.close() - return sio.MMapFile(self.tfn, mode) - - def test_write(self): - if os.name == "posix": - return # write() does't work on Unix :-( - file = self.makeStream(mode="w") - file.write("BooHoo\n") - file.write("Barf\n") - file.writelines(["a\n", "b\n", "c\n"]) - assert file.tell() == len("BooHoo\nBarf\na\nb\nc\n") - file.seek(0) - assert file.read() == "BooHoo\nBarf\na\nb\nc\n" - file.seek(0) - assert file.readlines() == ( - ["BooHoo\n", "Barf\n", "a\n", "b\n", "c\n"]) - assert file.tell() == len("BooHoo\nBarf\na\nb\nc\n") - -class TestTextInputFilter: - - packets = [ - "foo\r", - "bar\r", - "\nfoo\r\n", - "abc\ndef\rghi\r\nxyz", - "\nuvw\npqr\r", - "\n", - "abc\n", - ] - expected = [ - ("foo\n", 4), - ("bar\n", 9), - ("foo\n", 14), - ("abc\ndef\nghi\nxyz", 30), - ("\nuvw\npqr\n", 40), - ("abc\n", 44), - ("", 44), - ("", 44), - ] - - expected_with_tell = [ - ("foo\n", 4), - ("b", 5), - ("ar\n", 9), - ("foo\n", 14), - ("abc\ndef\nghi\nxyz", 30), - ("\nuvw\npqr\n", 40), - ("abc\n", 44), - ("", 44), - ("", 44), - ] - - expected_newlines = [ - (["abcd"], [None]), - (["abcd\n"], ["\n"]), - (["abcd\r\n"],["\r\n"]), - (["abcd\r"],[None]), # wrong, but requires precognition to fix - (["abcd\r", "\nefgh"], [None, "\r\n"]), - (["abcd", "\nefg\r", "hij", "k\r\n"], [None, "\n", ("\r", "\n"), - ("\r", "\n", "\r\n")]), - (["abcd", "\refg\r", "\nhij", "k\n"], [None, "\r", ("\r", "\r\n"), - ("\r", "\n", "\r\n")]) - ] - - def test_read(self): - base = TestReader(self.packets) - filter = sio.TextInputFilter(base) - for data, pos in self.expected: - assert filter.read(100) == data - - def test_read_tell(self): - base = TestReader(self.packets) - filter = sio.TextInputFilter(base) - for data, pos in self.expected_with_tell: - assert filter.read(100) == data - assert filter.tell() == pos - assert filter.tell() == pos # Repeat the tell() ! - - def test_seek(self): - base = TestReader(self.packets) - filter = sio.TextInputFilter(base) - sofar = "" - pairs = [] - while True: - pairs.append((sofar, filter.tell())) - c = filter.read(1) - if not c: - break - assert len(c) == 1 - sofar += c - all = sofar - for i in range(len(pairs)): - sofar, pos = pairs[i] - filter.seek(pos) - assert filter.tell() == pos - assert filter.tell() == pos - bufs = [sofar] - while True: - data = filter.read(100) - if not data: - assert filter.read(100) == "" - break - bufs.append(data) - assert "".join(bufs) == all - - def test_newlines_attribute(self): - - for packets, expected in self.expected_newlines: - base = TestReader(packets) - filter = sio.TextInputFilter(base) - for e in expected: - filter.read(100) - assert filter.newlines == e - -class TestTextOutputFilter: - - def test_write_nl(self): - base = TestWriter() - filter = sio.TextOutputFilter(base, linesep="\n") - filter.write("abc") - filter.write("def\npqr\nuvw") - filter.write("\n123\n") - assert base.buf == "abcdef\npqr\nuvw\n123\n" - - def test_write_cr(self): - base = TestWriter() - filter = sio.TextOutputFilter(base, linesep="\r") - filter.write("abc") - filter.write("def\npqr\nuvw") - filter.write("\n123\n") - assert base.buf == "abcdef\rpqr\ruvw\r123\r" - - def test_write_crnl(self): - base = TestWriter() - filter = sio.TextOutputFilter(base, linesep="\r\n") - filter.write("abc") - filter.write("def\npqr\nuvw") - filter.write("\n123\n") - assert base.buf == "abcdef\r\npqr\r\nuvw\r\n123\r\n" - - def test_write_tell_nl(self): - base = TestWriter() - filter = sio.TextOutputFilter(base, linesep="\n") - filter.write("xxx") - assert filter.tell() == 3 - filter.write("\nabc\n") - assert filter.tell() == 8 - - def test_write_tell_cr(self): - base = TestWriter() - filter = sio.TextOutputFilter(base, linesep="\r") - filter.write("xxx") - assert filter.tell() == 3 - filter.write("\nabc\n") - assert filter.tell() == 8 - - def test_write_tell_crnl(self): - base = TestWriter() - filter = sio.TextOutputFilter(base, linesep="\r\n") - filter.write("xxx") - assert filter.tell() == 3 - filter.write("\nabc\n") - assert filter.tell() == 10 - - def test_write_seek(self): - base = TestWriter() - filter = sio.TextOutputFilter(base, linesep="\n") - filter.write("x"*100) - filter.seek(50) - filter.write("y"*10) - assert base.buf == "x"*50 + "y"*10 + "x"*40 - -class TestDecodingInputFilter: - - def test_read(self): - chars = u"abc\xff\u1234\u4321\x80xyz" - data = chars.encode("utf8") - base = TestReader([data]) - filter = sio.DecodingInputFilter(base) - bufs = [] - for n in range(1, 11): - while 1: - c = filter.read(n) - assert type(c) == unicode - if not c: - break - bufs.append(c) - assert u"".join(bufs) == chars - -class TestEncodingOutputFilterTests: - - def test_write(self): - chars = u"abc\xff\u1234\u4321\x80xyz" - data = chars.encode("utf8") - for n in range(1, 11): - base = TestWriter() - filter = sio.EncodingOutputFilter(base) - pos = 0 - while 1: - c = chars[pos:pos+n] - if not c: - break - pos += len(c) - filter.write(c) - assert base.buf == data - -# Speed test - -FN = "BIG" - -def timeit(fn=FN, opener=sio.MMapFile): - f = opener(fn, "r") - lines = bytes = 0 - t0 = time.clock() - for line in f: - lines += 1 - bytes += len(line) - t1 = time.clock() - print "%d lines (%d bytes) in %.3f seconds for %s" % ( - lines, bytes, t1-t0, opener.__name__) - -def speed_main(): - def diskopen(fn, mode): - base = sio.DiskFile(fn, mode) - return sio.BufferingInputStream(base) - timeit(opener=diskopen) - timeit(opener=sio.MMapFile) - timeit(opener=open) - -# Functional test - -def functional_main(): - f = sio.DiskFile("sio.py") - f = sio.DecodingInputFilter(f) - f = sio.TextInputFilter(f) - f = sio.BufferingInputStream(f) - for i in range(10): - print repr(f.readline()) - diff --git a/pypy/appspace/test/test_struct.py b/pypy/appspace/test/test_struct.py deleted file mode 100644 index 02a6916682..0000000000 --- a/pypy/appspace/test/test_struct.py +++ /dev/null @@ -1,439 +0,0 @@ -import autopath -from pypy.appspace.test.support_tests import TestFailed, verbose, verify -from pypy.appspace import struct -import sys - -ISBIGENDIAN = sys.byteorder == "big" -del sys -verify((struct.pack('=i', 1)[0] == chr(0)) == ISBIGENDIAN, - "bigendian determination appears wrong") - -def string_reverse(s): - chars = list(s) - chars.reverse() - return "".join(chars) - -def bigendian_to_native(value): - if ISBIGENDIAN: - return value - else: - return string_reverse(value) - -def simple_err(func, *args): - try: - func(*args) - except struct.error: - pass - else: - raise TestFailed, "%s%s did not raise struct.error" % ( - func.__name__, args) - -def any_err(func, *args): - try: - func(*args) - except (struct.error, OverflowError, TypeError): - pass - else: - raise TestFailed, "%s%s did not raise error" % ( - func.__name__, args) - - -simple_err(struct.calcsize, 'Z') - -sz = struct.calcsize('i') -if sz * 3 != struct.calcsize('iii'): - raise TestFailed, 'inconsistent sizes' - -fmt = 'cbxxxxxxhhhhiillffd' -fmt3 = '3c3b18x12h6i6l6f3d' -sz = struct.calcsize(fmt) -sz3 = struct.calcsize(fmt3) -if sz * 3 != sz3: - raise TestFailed, 'inconsistent sizes (3*%r -> 3*%d = %d, %r -> %d)' % ( - fmt, sz, 3*sz, fmt3, sz3) - -simple_err(struct.pack, 'iii', 3) -simple_err(struct.pack, 'i', 3, 3, 3) -simple_err(struct.pack, 'i', 'foo') -simple_err(struct.pack, 'P', 'foo') -simple_err(struct.unpack, 'd', 'flap') -s = struct.pack('ii', 1, 2) -simple_err(struct.unpack, 'iii', s) -simple_err(struct.unpack, 'i', s) - -c = 'a' -b = 0 -h = 255 -i = 65535 -l = 65536 -f = 3.1415 -d = 3.1415 - -for prefix in ('', '@', '<', '>', '=', '!'): - for format in ('xcbhilfd', 'xcBHILfd'): - format = prefix + format - if verbose: - print "trying:", format - s = struct.pack(format, c, b, h, i, l, f, d) - cp, bp, hp, ip, lp, fp, dp = struct.unpack(format, s) - if (cp != c or bp != b or hp != h or ip != i or lp != l or - int(100 * fp) != int(100 * f) or int(100 * dp) != int(100 * d)): - # ^^^ calculate only to two decimal places - raise TestFailed, "unpack/pack not transitive (%s, %s)" % ( - str(format), str((cp, bp, hp, ip, lp, fp, dp))) - -# Test some of the new features in detail - -# (format, argument, big-endian result, little-endian result, asymmetric) -tests = [ - ('c', 'a', 'a', 'a', 0), - ('xc', 'a', '\0a', '\0a', 0), - ('cx', 'a', 'a\0', 'a\0', 0), - ('s', 'a', 'a', 'a', 0), - ('0s', 'helloworld', '', '', 1), - ('1s', 'helloworld', 'h', 'h', 1), - ('9s', 'helloworld', 'helloworl', 'helloworl', 1), - ('10s', 'helloworld', 'helloworld', 'helloworld', 0), - ('11s', 'helloworld', 'helloworld\0', 'helloworld\0', 1), - ('20s', 'helloworld', 'helloworld'+10*'\0', 'helloworld'+10*'\0', 1), - ('b', 7, '\7', '\7', 0), - ('b', -7, '\371', '\371', 0), - ('B', 7, '\7', '\7', 0), - ('B', 249, '\371', '\371', 0), - ('h', 700, '\002\274', '\274\002', 0), - ('h', -700, '\375D', 'D\375', 0), - ('H', 700, '\002\274', '\274\002', 0), - ('H', 0x10000-700, '\375D', 'D\375', 0), - ('i', 70000000, '\004,\035\200', '\200\035,\004', 0), - ('i', -70000000, '\373\323\342\200', '\200\342\323\373', 0), - ('I', 70000000L, '\004,\035\200', '\200\035,\004', 0), - ('I', 0x100000000L-70000000, '\373\323\342\200', '\200\342\323\373', 0), - ('l', 70000000, '\004,\035\200', '\200\035,\004', 0), - ('l', -70000000, '\373\323\342\200', '\200\342\323\373', 0), - ('L', 70000000L, '\004,\035\200', '\200\035,\004', 0), - ('L', 0x100000000L-70000000, '\373\323\342\200', '\200\342\323\373', 0), - ('f', 2.0, '@\000\000\000', '\000\000\000@', 0), - ('d', 2.0, '@\000\000\000\000\000\000\000','\000\000\000\000\000\000\000@', 0), - ('f', -2.0, '\300\000\000\000', '\000\000\000\300', 0), - ('d', -2.0, '\300\000\000\000\000\000\000\000','\000\000\000\000\000\000\000\300', 0), -] - -for fmt, arg, big, lil, asy in tests: - if verbose: - print "%r %r %r %r" % (fmt, arg, big, lil) - for (xfmt, exp) in [('>'+fmt, big), ('!'+fmt, big), ('<'+fmt, lil), - ('='+fmt, ISBIGENDIAN and big or lil)]: - res = struct.pack(xfmt, arg) - if res != exp: - raise TestFailed, "pack(%r, %r) -> %r # expected %r" % ( - fmt, arg, res, exp) - n = struct.calcsize(xfmt) - if n != len(res): - raise TestFailed, "calcsize(%r) -> %d # expected %d" % ( - xfmt, n, len(res)) - rev = struct.unpack(xfmt, res)[0] - if rev != arg and not asy: - raise TestFailed, "unpack(%r, %r) -> (%r,) # expected (%r,)" % ( - fmt, res, rev, arg) - -########################################################################### -# Simple native q/Q tests. - -has_native_qQ = 1 -try: - struct.pack("q", 5) -except struct.error: - has_native_qQ = 0 - -if verbose: - print "Platform has native q/Q?", has_native_qQ and "Yes." or "No." - -any_err(struct.pack, "Q", -1) # can't pack -1 as unsigned regardless -simple_err(struct.pack, "q", "a") # can't pack string as 'q' regardless -simple_err(struct.pack, "Q", "a") # ditto, but 'Q' - -def test_native_qQ(): - bytes = struct.calcsize('q') - # The expected values here are in big-endian format, primarily because - # I'm on a little-endian machine and so this is the clearest way (for - # me) to force the code to get exercised. - for format, input, expected in ( - ('q', -1, '\xff' * bytes), - ('q', 0, '\x00' * bytes), - ('Q', 0, '\x00' * bytes), - ('q', 1L, '\x00' * (bytes-1) + '\x01'), - ('Q', (1L << (8*bytes))-1, '\xff' * bytes), - ('q', (1L << (8*bytes-1))-1, '\x7f' + '\xff' * (bytes - 1))): - got = struct.pack(format, input) - native_expected = bigendian_to_native(expected) - verify(got == native_expected, - "%r-pack of %r gave %r, not %r" % - (format, input, got, native_expected)) - retrieved = struct.unpack(format, got)[0] - verify(retrieved == input, - "%r-unpack of %r gave %r, not %r" % - (format, got, retrieved, input)) - -if has_native_qQ: - test_native_qQ() - -########################################################################### -# Standard integer tests (bBhHiIlLqQ). - -import binascii - -class IntTester: - - # XXX Most std integer modes fail to test for out-of-range. - # The "i" and "l" codes appear to range-check OK on 32-bit boxes, but - # fail to check correctly on some 64-bit ones (Tru64 Unix + Compaq C - # reported by Mark Favas). - BUGGY_RANGE_CHECK = "bBhHiIlL" - - def __init__(self, formatpair, bytesize): - assert len(formatpair) == 2 - self.formatpair = formatpair - for direction in "<>!=": - for code in formatpair: - format = direction + code - verify(struct.calcsize(format) == bytesize) - self.bytesize = bytesize - self.bitsize = bytesize * 8 - self.signed_code, self.unsigned_code = formatpair - self.unsigned_min = 0 - self.unsigned_max = 2L**self.bitsize - 1 - self.signed_min = -(2L**(self.bitsize-1)) - self.signed_max = 2L**(self.bitsize-1) - 1 - - def test_one(self, x, pack=struct.pack, - unpack=struct.unpack, - unhexlify=binascii.unhexlify): - if verbose: - print "trying std", self.formatpair, "on", x, "==", hex(x) - - # Try signed. - code = self.signed_code - if self.signed_min <= x <= self.signed_max: - # Try big -endian. - expected = long(x) - if x < 0: - expected += 1L << self.bitsize - assert expected > 0 - expected = hex(expected)[2:-1] # chop "0x" and trailing 'L' - if len(expected) & 1: - expected = "0" + expected - expected = unhexlify(expected) - expected = "\x00" * (self.bytesize - len(expected)) + expected - - # Pack work? - format = ">" + code - got = pack(format, x) - verify(got == expected, - "'%s'-pack of %r gave %r, not %r" % - (format, x, got, expected)) - - # Unpack work? - retrieved = unpack(format, got)[0] - verify(x == retrieved, - "'%s'-unpack of %r gave %r, not %r" % - (format, got, retrieved, x)) - - # Adding any byte should cause a "too big" error. - any_err(unpack, format, '\x01' + got) - - # Try little-endian. - format = "<" + code - expected = string_reverse(expected) - - # Pack work? - got = pack(format, x) - verify(got == expected, - "'%s'-pack of %r gave %r, not %r" % - (format, x, got, expected)) - - # Unpack work? - retrieved = unpack(format, got)[0] - verify(x == retrieved, - "'%s'-unpack of %r gave %r, not %r" % - (format, got, retrieved, x)) - - # Adding any byte should cause a "too big" error. - any_err(unpack, format, '\x01' + got) - - else: - # x is out of range -- verify pack realizes that. - if code in self.BUGGY_RANGE_CHECK: - if verbose: - print "Skipping buggy range check for code", code - else: - any_err(pack, ">" + code, x) - any_err(pack, "<" + code, x) - - # Much the same for unsigned. - code = self.unsigned_code - if self.unsigned_min <= x <= self.unsigned_max: - # Try big-endian. - format = ">" + code - expected = long(x) - expected = hex(expected)[2:-1] # chop "0x" and trailing 'L' - if len(expected) & 1: - expected = "0" + expected - expected = unhexlify(expected) - expected = "\x00" * (self.bytesize - len(expected)) + expected - - # Pack work? - got = pack(format, x) - verify(got == expected, - "'%s'-pack of %r gave %r, not %r" % - (format, x, got, expected)) - - # Unpack work? - retrieved = unpack(format, got)[0] - verify(x == retrieved, - "'%s'-unpack of %r gave %r, not %r" % - (format, got, retrieved, x)) - - # Adding any byte should cause a "too big" error. - any_err(unpack, format, '\x01' + got) - - # Try little-endian. - format = "<" + code - expected = string_reverse(expected) - - # Pack work? - got = pack(format, x) - verify(got == expected, - "'%s'-pack of %r gave %r, not %r" % - (format, x, got, expected)) - - # Unpack work? - retrieved = unpack(format, got)[0] - verify(x == retrieved, - "'%s'-unpack of %r gave %r, not %r" % - (format, got, retrieved, x)) - - # Adding any byte should cause a "too big" error. - any_err(unpack, format, '\x01' + got) - - else: - # x is out of range -- verify pack realizes that. - if code in self.BUGGY_RANGE_CHECK: - if verbose: - print "Skipping buggy range check for code", code - else: - any_err(pack, ">" + code, x) - any_err(pack, "<" + code, x) - - def run(self): - from random import randrange - - # Create all interesting powers of 2. - values = [] - for exp in range(self.bitsize + 3): - values.append(1L << exp) - - # Add some random values. - for i in range(self.bitsize): - val = 0L - for j in range(self.bytesize): - val = (val << 8) | randrange(256) - values.append(val) - - # Try all those, and their negations, and +-1 from them. Note - # that this tests all power-of-2 boundaries in range, and a few out - # of range, plus +-(2**n +- 1). - for base in values: - for val in -base, base: - for incr in -1, 0, 1: - x = val + incr - try: - x = int(x) - except OverflowError: - pass - self.test_one(x) - - # Some error cases. - for direction in "<>": - for code in self.formatpair: - for badobject in "a string", 3+42j, randrange: - any_err(struct.pack, direction + code, badobject) - -for args in [("bB", 1), - ("hH", 2), - ("iI", 4), - ("lL", 4), - ("qQ", 8)]: - t = IntTester(*args) - t.run() - - -########################################################################### -# The p ("Pascal string") code. - -def test_p_code(): - for code, input, expected, expectedback in [ - ('p','abc', '\x00', ''), - ('1p', 'abc', '\x00', ''), - ('2p', 'abc', '\x01a', 'a'), - ('3p', 'abc', '\x02ab', 'ab'), - ('4p', 'abc', '\x03abc', 'abc'), - ('5p', 'abc', '\x03abc\x00', 'abc'), - ('6p', 'abc', '\x03abc\x00\x00', 'abc'), - ('1000p', 'x'*1000, '\xff' + 'x'*999, 'x'*255)]: - got = struct.pack(code, input) - if got != expected: - raise TestFailed("pack(%r, %r) == %r but expected %r" % - (code, input, got, expected)) - (got,) = struct.unpack(code, got) - if got != expectedback: - raise TestFailed("unpack(%r, %r) == %r but expected %r" % - (code, input, got, expectedback)) - -test_p_code() - - -########################################################################### -# SF bug 705836. "<f" and ">f" had a severe rounding bug, where a carry -# from the low-order discarded bits could propagate into the exponent -# field, causing the result to be wrong by a factor of 2. - -def test_705836(): - import math - - for base in range(1, 33): - # smaller <- largest representable float less than base. - delta = 0.5 - while base - delta / 2.0 != base: - delta /= 2.0 - smaller = base - delta - # Packing this rounds away a solid string of trailing 1 bits. - packed = struct.pack("<f", smaller) - unpacked = struct.unpack("<f", packed)[0] - # This failed at base = 2, 4, and 32, with unpacked = 1, 2, and - # 16, respectively. - # print base,delta,"pypy :%s"%packed,unpacked,"CPython :",Cunpacked - verify(base == unpacked," %s == %s"%(base,unpacked)) - bigpacked = struct.pack(">f", smaller) - verify(bigpacked == string_reverse(packed), - ">f pack should be byte-reversal of <f pack") - unpacked = struct.unpack(">f", bigpacked)[0] - verify(base == unpacked) - - # Largest finite IEEE single. - big = (1 << 24) - 1 - big = math.ldexp(big, 127 - 23) - packed = struct.pack(">f", big) - unpacked = struct.unpack(">f", packed)[0] - verify(big == unpacked) - - # The same, but tack on a 1 bit so it rounds up to infinity. - big = (1 << 25) - 1 - big = math.ldexp(big, 127 - 24) - try: - packed = struct.pack(">f", big) - except OverflowError: - pass - else: - TestFailed("expected OverflowError") - -test_705836() diff --git a/pypy/appspace/types.py b/pypy/appspace/types.py deleted file mode 100644 index 6866762ec8..0000000000 --- a/pypy/appspace/types.py +++ /dev/null @@ -1,118 +0,0 @@ -"""Appspace types module. - -!! This file has been copied practicaly verbatim from the CPython source. -!! See http://www.python.org/2.3.2/license.html for licensing info. - -Define names for all type symbols known in the standard interpreter. - -Types that are part of optional modules (e.g. array) are not listed. -""" -from __future__ import generators - -import sys - -# Iterators in Python aren't a matter of type but of protocol. A large -# and changing number of builtin types implement *some* flavor of -# iterator. Don't check the type! Use hasattr to check for both -# "__iter__" and "next" attributes instead. - -NoneType = type(None) -TypeType = type -ObjectType = object - -IntType = int -try: - LongType = long -except NameError: - pass -FloatType = float -try: - BooleanType = bool -except NameError: - pass -try: - ComplexType = complex -except NameError: - pass - -StringType = str -try: - UnicodeType = unicode - StringTypes = (StringType, UnicodeType) -except NameError: - StringTypes = (StringType,) - -try: - BufferType = buffer -except NameError: - pass - -TupleType = tuple -ListType = list -DictType = DictionaryType = dict - -def _f(): pass -FunctionType = type(_f) -LambdaType = type(lambda: None) # Same as FunctionType -try: - CodeType = type(_f.func_code) -except RuntimeError: - # Execution in restricted environment - pass - -def g(): - yield 1 -try: - GeneratorType = type(g()) -except: - # Refusing generators - pass -del g - -class _C: - def _m(self): pass -ClassType = type(_C) -try: - UnboundMethodType = type(_C._m) # Same as MethodType -except AttributeError: - pass -_x = _C() -InstanceType = type(_x) -MethodType = type(_x._m) - -BuiltinFunctionType = type(len) -BuiltinMethodType = type([].append) # Same as BuiltinFunctionType - -ModuleType = type(sys) -try: - FileType = file -except NameError: - pass -try: - XRangeType = type(xrange(0)) -except NameError: - pass - -try: - raise TypeError -except TypeError: - try: - tb = sys.exc_info()[2] - TracebackType = type(tb) - FrameType = type(tb.tb_frame) - except AttributeError: - # In the restricted environment, exc_info returns (None, None, - # None) Then, tb.tb_frame gives an attribute error - pass - tb = None; del tb - -SliceType = type(slice(0)) -EllipsisType = type(Ellipsis) - -#DictProxyType = type(TypeType.__dict__) -try: - NotImplementedType = type(NotImplemented) -except NameError: - pass - -del sys, _f, _C, _x#, generators # Not for export diff --git a/pypy/conftest.py b/pypy/conftest.py deleted file mode 100644 index f54580977e..0000000000 --- a/pypy/conftest.py +++ /dev/null @@ -1,143 +0,0 @@ -import py -from pypy.interpreter.gateway import app2interp_temp -from pypy.interpreter.error import OperationError -from pypy.objspace.std import StdObjSpace -from pypy.tool import pytestsupport - -# -# PyPy's command line extra options (these are added -# to py.test's standard options) -# -Option = py.test.Option -options = ('pypy options', [ - Option('-o', '--objspace', action="store", default=None, - type="string", dest="objspacename", - help="object space to run tests on."), -]) - - -def getobjspace(name=None, _spacecache={}): - """ helper for instantiating and caching space's for testing. - """ - if name is None: - name = py.test.config.option.objspacename - if name is None: - name = py.std.os.environ.get('OBJSPACE', 'std') - else: - optionname = py.test.config.option.objspacename - if optionname is not None and optionname != name: - return None - try: - return _spacecache[name] - except KeyError: - module = __import__("pypy.objspace.%s" % name, None, None, ["Space"]) - space = module.Space() - _spacecache[name] = space - if name != 'flow': # not sensible for flow objspace case - space.setitem(space.w_builtins, space.wrap('AssertionError'), - pytestsupport.build_pytest_assertion(space)) - space.setitem(space.w_builtins, space.wrap('raises'), - space.wrap(pytestsupport.app_raises)) - space.setitem(space.w_builtins, space.wrap('skip'), - space.wrap(pytestsupport.app_skip)) - space.raises_w = pytestsupport.raises_w.__get__(space) - space.eq_w = pytestsupport.eq_w.__get__(space) - return space - -# -# Interfacing/Integrating with py.test's collection process -# - -class Module(py.test.collect.Module): - """ we take care of collecting classes both at app level - and at interp-level (because we need to stick a space - at the class) ourselves. - """ - def collect_function(self, extpy): - if extpy.check(func=1, basestarts='test_'): - if extpy.check(genfunc=1): - yield IntTestGenerator(extpy) - else: - yield IntTestFunction(extpy) - - def collect_app_function(self, extpy): - if extpy.check(func=1, basestarts='app_test_'): - assert not extpy.check(genfunc=1), ( - "generator app level functions? you must be joking") - yield AppTestFunction(extpy) - - def collect_class(self, extpy): - if extpy.check(class_=1, basestarts="Test"): - yield IntClassCollector(extpy) - - def collect_appclass(self, extpy): - if extpy.check(class_=1, basestarts="AppTest"): - yield AppClassCollector(extpy) - -def gettestobjspace(name=None): - space = getobjspace(name) - if space is None: - py.test.skip('test requires object space %r' % (name,)) - return space - - -class PyPyItem(py.test.Item): - # All PyPy test items catch and display OperationErrors specially. - - def setup_module(self, mod): - # if hasattr(mod, 'objspacename'): - # mod.space = getttestobjspace(mod.objspacename) - # stick py.test raise in module globals - mod.raises = py.test.raises - super(PyPyItem, self).setup_module(mod) - - def setup_class(self, cls): - name = getattr(cls, 'objspacename', None) - if name is None: - m = __import__(cls.__module__, {}, {}, ["objspacename"]) - name = getattr(m, 'objspacename', None) - cls.space = gettestobjspace(name) - super(PyPyItem, self).setup_class(cls) - - def execute_appex(self, space, target, *args): - try: - target(*args) - except OperationError, e: - raise self.Failed(excinfo=pytestsupport.AppExceptionInfo(space, e)) - -class IntTestFunction(PyPyItem): - def execute(self, target, *args): - co = target.func_code - if 'space' in co.co_varnames[:co.co_argcount]: - name = target.func_globals.get('objspacename', None) - space = gettestobjspace(name) - self.execute_appex(space, target, space, *args) - else: - target(*args) - -class AppTestFunction(PyPyItem): - def execute(self, target, *args): - assert not args - name = target.func_globals.get('objspacename', None) - space = gettestobjspace(name) - func = app2interp_temp(target, target.__name__) - self.execute_appex(space, func, space) - -class IntTestMethod(PyPyItem): - def execute(self, target, *args): - space = target.im_self.space - self.execute_appex(space, target, *args) - -class AppTestMethod(PyPyItem): - def execute(self, target, *args): - assert not args - space = target.im_self.space - func = app2interp_temp(target.im_func, target.__name__) - self.execute_appex(space, func, space, space.w_None) - -class AppClassCollector(py.test.collect.Class): - Item = AppTestMethod - -class IntClassCollector(py.test.collect.Class): - Item = IntTestMethod - diff --git a/pypy/documentation/abstractobjspace.txt b/pypy/documentation/abstractobjspace.txt deleted file mode 100644 index a8d6a3dae8..0000000000 --- a/pypy/documentation/abstractobjspace.txt +++ /dev/null @@ -1,16 +0,0 @@ -======================= -AbstractObjectSpace -======================= - -We currently don't really have an AbstractObjSpace but an AnnotateObjSpace_ (or AnnSpace for short) -which comes close to it. Abstract Interpretation means to the bytecode instructions of a program like an interpreter does but with abstract objects instead of concrete ones. Remember that in PyPy this is done by using alternate object spaces with the same interpreter main loop. - -The most abstract object space is the one manipulating the most abstract objects that you could imagine: they are all equivalent, because we have abstracted away any information about the object. There is actually only one of them left, and we can call it "the object". In Python terms, our AbstractObjectSpace uses None for all its wrapped objects. Any operation between wrapped objects gives None again as the wrapped result -- there is nothing else it could give anyway. So when you have said that the add method of AbstractObjectSpace takes None and None and returns None you have said everything. - -The point of such an object space is for example to check the bytecode. The interpreter will really run your bytecode, just with completely abstract arguments. If there is no problem then you are sure that the bytecode is valid. You could also record, during this abstract interpretation, how much the stack ever grows; that would give you a fool-proof method of computing the co_stacksize argument of a code object which might be useful for the PyPy compiler. (There are subtleties which I won't describe here, but that's the basic idea.) - -Typically, however, abstract object spaces are a (little) bit less abstract, still maintaining a minimal amount of information about the objects. For example, a wrapped object could be represented by its type. You then define the object space's add to return int when the two arguments are int and int. That way, you abstractedly call a function with the input argument's types and what the interpreter will do is a type inference. (Here also there are subtle problems, even besides the remark that integer operations can overflow and actually return longs in a real Python implementation.) - -As an example of more abstract object spaces you have the ones with finite domain, i.e. with a finite number of different possible wrapped objects. For example, you can use True and False as wrapped values to denote the fact that the object is, respectively, a non-negative integer or anything else. In this way you are doing another kind of type inference that just tells you which variables will only ever contain non-negative integers. - -.. _AnnotateObjSpace: annotateobjspace.html diff --git a/pypy/documentation/annotateobjspace.txt b/pypy/documentation/annotateobjspace.txt deleted file mode 100644 index 683af8d345..0000000000 --- a/pypy/documentation/annotateobjspace.txt +++ /dev/null @@ -1,26 +0,0 @@ -========================= -Annotate Object Space -========================= - -was TranslateObjectSpace -------------------------- -This has been renamed and is currently being re-written. Older information follows: - -This is an example of an ObjectSpace that differs a lot from StandardObjectSpace_. - -At some point in the near future we will have to design a translator from all the RestrictedPython_ code we have into C code. This is the sine qua non condition for our work to be actually usable. Quite unexpectedly, the major piece of the translator is itself an object space, the TranslateObjectSpace. Its goal is to run any RestrictedPython code and produce C code in the background as it does so. - -More specifically, we take our PyPy interpreter with the TranslateObjectSpace instead of the StandardObjectSpace, and run that, asking it to interpret some RestrictedPython bytecode. The idea is that a wrapped object is now the name of a variable in the C program we are emitting: - -The TranslateObjectSpace's add method takes two such variable names x and y, and emits the C code z=x+y;, where z is a new variable name which is returned as the result of add. So when the interpreter goes along the bytecode, the TranslateObjectSpace will create variable names and emit C code that does just the same thing as the bytecode. - -The above description is what would occur if RestrictedPython only had integer data types. We must actually make the wrapped objects a bit more elaborate to also record, besides the C variable name, its basic type. This can be easy, like prefixing the variable name with a type character; for example, add would behave as above if the variable names begin with ``i_`` but differently if they begin with the prefix denoting floats or lists, say. - -Once you grasp the idea that this process can actually easily translate any RestrictedPython to C, and if you remember that the interpreter main loop itself is written in RestrictedPython, then you shouldn't be surprised if I say that all we have to do next is run the interpreter to interpret itself (and thus translate itself to C)! - -This is again a rough sketch, but it shows why I expect translating RestrictedPython to C to be easy. The RestrictedPython is precisely restricted in such a way that makes the above process work, e.g. we only ever use well-known types and operations that are easy to convert to an efficient C equivalent. (For example, if lists are translated to C arrays, I'm sure Christian would insist on not using '+' between lists in RestrictedPython but rather '+=', which has a more direct translation as realloc() -- although by now he might be convinced that supporting '+' as a bonus isn't going to be any difficult anyway :-) ) - ------------------------------- - -.. _StandardObjectSpace: stdobjspace.html -.. _RestrictedPython: restrictedpy.html diff --git a/pypy/documentation/annotation.txt b/pypy/documentation/annotation.txt deleted file mode 100644 index b61643024d..0000000000 --- a/pypy/documentation/annotation.txt +++ /dev/null @@ -1,366 +0,0 @@ -The annotation pass -=================== - -We describe below how a control flow graph can be "annotated" -to discover the types of the objects. This annotation pass is -done after control flow graphs are built by the FlowObjSpace, -but before these graphs are translated into low-level code -(e.g. C/Lisp/Pyrex). - - -An example: the factorial -------------------------- - -Say we want to make the control flow graph and type inference -on the following program:: - - def f(n): - if n >= 2: - return n * f(n-1) - else: - return 1 - -The flow objspace gives us the following graph (after the -simplification that turns ``call`` into ``simple_call``):: - - StartBlock(v1): - v2 = ge(v1, 2) - exitswitch(v2) - link "True" to Block2(v1) - link "False" to Block3 - - Block2(v3): - v4 = sub(v3, 1) - v7 = simple_call(f, v4) - v8 = mul(v3, v7) - jump to ReturnBlock(v8) - - Block3: - v9 = 1 - jump to ReturnBlock(v9) - - ReturnBlock(retval): - (empty, just returns retval) - -Suppose that we know that ``f`` takes an ``int`` argument. -We start type inference on the first block:: - - Analyse(StartBlock): - v1 ----> SV1 type(SV1)=int - v2 ----> SV2 type(SV2)=bool - -The notation is as follows. Everything at the right of the arrows -lives in a "big heap" of objects and annotations; a object is like a -CPython ``PyObject`` object structure in the heap, althought it can -here be unknown (SV1, SV2, SV3...). Annotations give some information -about the unknown heap objects. The arrows represent binding from -variables to objects. ``SV`` means ``SomeValue``. - -After StartBlock, we proceed to the type inference of its exits; -first Block2:: - - Analyse(Block2): - v3 ------------> SV1 # copied from StartBlock - v4 ------------> SV4 type(SV4)=int - v7 ------------> impossiblevalue - -It fails at the simple_call to f, because we don't know yet anything -about the return value of f. We suspend the analysis of Block2 and -resume at some other non-blocked point -- in this case, the other exit -from the StackBlock, which is jumping to Block3:: - - Analyse(Block3): - v9 --------> SV31 # const(SV31)=1, type(SV31)=int - -The object SV31 is the constant object 1. -Then we proceed to ReturnBlock:: - - Analyse(ReturnBlock): - retval --------> SV31 - -And we are done. We can now try to resume the suspended analysis of -Block2 -- in practice it is easier to just restart it:: - - Analyse(Block2): - v3 ------------> SV1 # copied from StartBlock - v4 ------------> SV4 type(SV4)=int - v7 ------------> SV31 # because that's the retval of f - v8 ------------> SV8 type(SV8)=int - -And now is the second branch into ReturnBlock. We must combine the -annotations coming from the two branches:: - - Intersection(ReturnBlock): - previous annotations for retval ----> SV31 type(SV31)=int const(SV31)=1 - new annotations for retval ---------> SV8 type(SV8)=int - intersection of both is retval -----> SV10 type(SV10)=int - -We invalidate the analysis of the blocks that depend on this new result, -namely ReturnBlock, which in turn invalidates the analysis of Block2 -which depended on the return value. Then we can restart it once more:: - - Analyse(Block2): - v3 ------------> SV1 # with type(SV1)=int - v4 ------------> SV4 type(SV4)=int - v7 ------------> SV10 # with type(SV10)=int - v8 ------------> SV11 type(SV11)=int - -Again, we must redo the intersection of the two branches -that enter ReturnBlock:: - - Intersection(ReturnBlock): - previous annotations for retval -------> SV10 type(SV10)=int - new annotations for retval ------------> SV11 type(SV11)=int - intersection doesn't change any more. - -Now the annotations are stable, and we are done. In the final version -summarized below, all the objects that are used in the variables and -in retval are properly annotated:: - - Bindings: - v1 ------> SV1 - v2 ------> SV2 - v3 ------> SV1 - v4 ------> SV4 - v7 ------> SV10 - v8 ------> SV11 - v9 ------> SV31 - retval --> SV10 - - Annotations: - type(SV1)=int - type(SV2)=bool - type(SV4)=int - type(SV10)=int - type(SV11)=int - -The bindings are implemented as a dictionary, and the annotations as -an AnnotationSet instance. More about it below. - - -Whole-program analysis ----------------------- - -A program of more than one function is analysed in exactly the same way, -starting from an entry point and following calls. We have a cache of all -the flowgraphs that the flow objspace produced, and a list of pending blocks -that we have to type-analyse. When the analysis of a block temporarily -fails (as above for the first recursive call to ``f``) we move the block -back into the pending list. There is only one heap of annotations for the -whole program, so that we can track where the objects come from and go -through the whole program. (This makes separate compilation very difficult, -I guess.) - - -Empty lists and mutable objects -------------------------------- - -Nothing special is required for empty lists. Let's try:: - - def g(a, n): - a.append(n) - - def f(): - b = [] - g(b, 5) - g(b, 6) - -As above, after simplification:: - - F_StartBlock(): - v1 = newlist() - v2 = simple_call(g, v1, 5) - v3 = simple_call(g, v1, 6) - - Analyse(F_StartBlock): - v1 -------> SV1 type(SV1)=list len(SV1)=0 listitems(SV1)=impossiblevalue - v2 -------> impossiblevalue - -The annotations about ``SV1`` mean that it is an empty list. When trying -to get any item out of it, the result is ``impossiblevalue``, because if we -try to execute code like ``c=b[0]`` then obviously it is impossible for -``c`` to contain any value. It is important not to confuse the two extreme -values: ``impossiblevalue`` means that no value can ever be found there -during actual execution, and corresponds to a ``SVx`` about which all -annotations are still possible. During the annotation pass, annotations -about a ``SVx`` can only *decrease*: we can later remove annotations that -we find to be incorrect, but we don't add new annotations. Thus an -``impossiblevalue`` is a value with potentially all annotations at first. -The other extreme is a ``SVx`` with no annotation at all; it represents -an object about which we know nothing at all -- and about which nothing -will be known later either: it means that we have inferred that many -different objects could be found at that point during execution. -Typically it shows a problem, e.g. that type inference failed to figure -out what object type can appear at that point. - -Let's come back to the example. The type analysis above fails at ``v2`` -because of the calls to ``g``, but it triggers the analysis of ``g`` with -the input arguments' annotations:: - - G_StartBlock(v4, v5): - v6 = getattr(v4, 'append') - v7 = simple_call(v6, v5) - - Analyse(G_StartBlock): - v4 -------> SV1 # from the call above - v5 -------> SV35 const(SV35)=5 type(SV35)=int - v6 -------> SV6 im_self(SV6)=SV1 im_func(SV6)=list_append - v7 -------> SV30 const(SV30)=None - -And most importantly the call to list_append corrects the annotations about -``SV1``. The annotation ``len(SV1)=0`` is deleted, and ``listitems(SV1)`` -is generalized from ``impossiblevalue`` to ``SV35`` -- this is done by -the same intersection process as above: we already know that -``listitems(SV1)`` can be ``impossiblevalue``, and now we figure out that -it could also be ``SV35``, so we take the intersection of the annotations -that apply to both ``impossiblevalue`` and ``SV35``. The result in this -case is just ``SV35``. - -Note that killing annotations like ``len(SV1)=0`` invalidates the inference -in any block that explicitely depends on it. Such blocks are marked as -"to be redone". (There isn't any such block in the present example.) - -Only after this can the analysis of ``F_StartBlock`` proceed, and -now we know that v1 points to the list ``SV1`` with the correct annotations: -unknown length, all items are ``5``. - -In the above example I also show a second call to ``g(b, 6)``, which -triggers an intersection on the input argument types of ``g`` which was -previously thought to be used with ``5`` only:: - - Intersection(G_StartBlock): - previous annotations for v5 -----> SV35 const(SV35)=5 type(SV35)=int - new annotations for v5 ----------> SV36 const(SV36)=6 type(SV36)=int - intersection of both is v5 ------> SV5 type(SV5)=int - -And so this time the list ``SV1`` is updated with:: - - listitems(SV1)=SV5 - -and now we know that we have a list of integers. - -Note that during this whole process the same list is represented by ``SV1``. -This is important, so that any code anywhere that could modify the list -can kill invalid annotations about it. Intersection must be clever about -mutable objects: we have seen above an example where ``retval`` could map -to ``SV10`` or ``SV11``, and the intersection said it was fine because they -had the same annotations. It would not be fine if ``SV10`` and ``SV11`` -could be of a mutable type. In this case we must force ``SV10==SV11`` for -the whole program. In other words the representation choosen for a list -depends on all the places where this list could go, and these places -themselves use a representation that depends on all the lists that could -come at this point. All these places and lists will use a common, -most general representation. - - -Polymorphism and mixed flowing/inference ----------------------------------------- - -(This paragraph is just an idea, it is not implemented.) - -We might eventually mix type inference and control flow generation a bit -more than described above. The annotations could influence the generation -of the graph. - -The most interesting influence would be to occasionally prevent two -FrameStates from being merged. This would result in a bigger control flow -graph in which several basic blocks can contain the operations about the -same bytecode positions, with different annotations. In particular, a -quite interesting idea is to disallow two states to be merged if the -resulting intersection of annotations is too poor -- say if it would make -genpyrex.py use the fall-back generic object type, which is not available -to other genxxx.py. - -The result is that we will automatically generate several specialized -version of the RPython code when it is meant to be polymorphic. For -example, in a function such as:: - - def push(stack, item): - stack.append(item) - -the different entry points, specifying quite different type annotations -for ``item``, are all unmergeable, as merging them would result in -insufficently many annotations left. By contrast, in the factorial -example above, all merges are fine because they conserve at least the -``type(X)=int`` annotation. - - -SomeValue ---------- - -An abstract Python object in the heap is represented by an -instance of ``pypy.annotation.model.SomeValue``. All these SomeValue() -instances print as SV0, SV1, SV2... for debugging, but we only -use their identity internally. - -A SomeValue() alone represents an object about which nothing -is known. To collect information about such an object we use an -instance of ``pypy.annotation.annset.AnnotationSet``. An -annotation is like an attribute of a SomeValue(); for example, to -say that an object SV5 is known to be an integer, then we set the -annotation ``SV5.type = int``. However, for various good and bad -reasons, the annotation is not actually stored as an attribute, -but managed by the AnnotationSet(). The allowed "attributes", -i.e. the various annotations that exist, are in -``pypy.annotation.model.ANN``. Thus for the above example we -set the annotation ``ANN.type`` of SV5 to ``int``. This is what -we wrote in the above examples ``type(SV5) = int``. - -Note that unlike previous attempts an annotation is now always -a "name" (ANN.type) with just two arguments: the subject (SV5) and -the associated value (int). Just like with attributes, there are -never more than one associated value per subject and attribute name. -But unlike attributes, all annotations have a default value: -``mostgeneralvalue``, which is a SomeValue() about which nothing -is known. The only differences between the ``mostgeneralvalue`` -and a normal SomeValue() with no annotations are that AnnotationSet -will complain if you try to set annotations to ``mostgeneralvalue``; -and for convenience reasons ``mostgeneralvalue`` is false in a -boolean context. - - -AnnotationSet and ANN ---------------------- - -AnnotationSet has two main methods: ``get(name, subject)`` to -read the current annotation ``name`` about ``subject``, and -``set(name, subject, value)`` to set it. - -The meaning of ``value`` depends on the annotation. In some -cases it is a usual Python object (int, 3, True...). In other -cases it is specifically a SomeValue() instance, on which we -can recursively have partial information only. Here are -a few common annotations: - -* ``ANN.type``: the ``value`` is the type (int, list, ...). - -* ``ANN.len``: the ``value`` is the length of the object - (if known and constant, of course). - -* ``ANN.const``: we know that the ``subject`` is a constant - object; the ``value`` of ``ANN.const`` is precisely this - constant. - -* ``ANN.listitems``: the ``value`` is another SomeValue() - which stands for any item of the list. Thus the - annotations about this sub-SomeValue() tell what is known - in general about all the items in the list. - -* ``ANN.tupleitem[index]``: this is a family of annotations. - This is one of the reasons why we don't just use attributes - to store annotations: the whole expression ``ANN.tupleitem[0]`` - would be the attribute name. The expression ``ANN.tupleitem[1]`` - would be a different attribute name, and so on. Annotation-wise, - ``ANN.tupleitem[i]`` has a ``value`` which is a SomeValue() - describing what is known about the item ``i`` of a tuple. - -* ``ANN.immutable``: the ``value`` is always ``True``, unless - the annotation is not set, in which case it automatically - defaults to ``mostgeneralvalue`` (which is considered as false - in a boolean context for convenient checking). When - ``ANN.immutable`` is set, it means that the subject is known to - be of an immutable type (int, float, tuple...). This influences - the intersection algorithm. - -The interection algorithm is implemented in the ``merge()`` method -of AnnotationSet. diff --git a/pypy/documentation/architecture.txt b/pypy/documentation/architecture.txt deleted file mode 100644 index 1c2f78fefd..0000000000 --- a/pypy/documentation/architecture.txt +++ /dev/null @@ -1,312 +0,0 @@ -Overview on PyPy's current architecture (Dec. 2003) -=================================================== - - -Introduction and higher level picture -------------------------------------- - -The various parts of PyPy have always been under more or less heavy -refactoring during our five one-week sprints in 2003. However, the -higher level architecture remains rather simple and unchanged. There -are two independent basic subsystems: - -- the *standard interpreter* which implements the Python language - and is composed out of two components: - - - the *plain interpreter* which is responsible for interpreting - code objects and implementing bytecodes, - - - the *standard object space* which implements creation, access and - modification of application level objects, - - Note that the *standard interpreter* can run fine on top of CPython - (the C Implementation of Python led by Guido van Rossum) but of course - the double-interpretation penalty lets us interpret python programs - rather slowly. - -- the *translation process* which aims at producing a different (low-level) - representation of our standard interpreter. The *translation process* - is done in three steps: - - - producing a *flow graph* representation of the standard interpreter. - A combination of a *plain interpreter* and a *flow object space* - performs "abstract interpretation" to record the flow of objects - and execution throughout a python program into such a *flow graph*. - - - the *annotator* which performs type inference on the flow graph - - - the *translator* which translates the (annotated) flow graph into - another language, currently Pyrex/C and LISP. - -Please note that we are using the term *interpreter* most often in -reference to the *plain interpreter* which just knows enough to read, -dispatch and implement *bytecodes* thus shuffling objects around on the -stack and between namespaces. The (plain) interpreter is completly -ignorant of how to access, modify or construct objects and their -structure and thus delegates such operations to a so called "Object Space". - -XXX mention Parser and compiler (we have one available since the Berlin -sprint but it is not integrated) - -The Interpreter -=============== - -The interpreter handles python code objects. The interpreter can build -code objects from Python sources, when needed, by invoking Python's -builtin compiler (we also have a way of constructing those code objects -from python code only, but we have not integrated it yet). Code objects -are a nicely preprocessed, structured representation of source code, and -their main content is *bytecode*. In addition, code objects also know -how to create a *frame* object which has the responsibility to -*interpret* a code object's bytecode. Each bytecode is implemented by a -python function, which, in turn, delegates operations on -application-level objects to an object space. - -The Object Space -================ - -The object space creates all objects and knows how to perform operations -on the objects. You may think of an object space as being a library -offering a fixed API, a set of *operations*, with implementations that -correspond to the known semantics of Python objects. An example of an -operation is *add*: add's implementations are, for example, responsible -for performing numeric addition when add works on numbers, concatenation -when add works on built-in sequences. - -All object-space operations take and return "application level" objects. -There is only one, very simple, object-space operation which allows the -interpreter to gain some knowledge about the value of an -application-level object: ``is_true()``, which returns a boolean -interpreter-level value. This is necessary to implement, for example, -if-statements (or rather, to be pedantic, to implement the -conditional-branching bytecodes into which if-statements get compiled). - -We currently have four working object spaces which can be plugged into -the interpreter: - -- The Trivial Object Space, which basically delegates almost all - operations to the underlying CPython interpreter. It was, and still - is, used to test our interpreter. Alhough it is not essential, it - remains useful for testing, and thus it is here to stay. - -- The Standard Object Space, which is an almost complete implementation - of the various Python objects. This is the main focus of this - document, since the Standard Object Space, together with the - interpreter, is the foundation of our Python implementation. - -- the Flow Object Space, which transforms a python program into a - flow-graph representation. The Flow Object Space performs this - transformation task through "abstract interpretation", which we will - explain later in this document. - -- the Trace Object Space, which wraps either the trivial or the standard - object space in order to trace the execution of bytecodes, frames and - object space operations. - -The Standard Object Space -========================= - -The Standard Object Space implements python objects and types, and all -operations on them. It is thus an essential component in order to reach -CPython compatibility. - -The implementations of ints, floats, strings, dicts, lists, etc, all -live in separate files, and are bound together by a "multimethod" -mechanism. Multimethods allow a caller - most notably the interpreter - -to stay free from knowing anything about objects' implementations. Thus -multimethods implement a way of delegating to the right implementation -based on the passed in objects (objects previously created by the same -subsystem). We examine how the multimethod mechanism works through an -example. - -We consider the add-operation of ``int`` and ``float`` objects, and -disregard all other object types for the moment. There is one -multimethod ``add``, and both relevant implementations, ``add(intimpl, -intimpl)`` and ``add(floatimpl, floatimpl)``, *register* with that one -``add`` multimethod. - -When we have the expression ``2+3`` in our application program, the -interpreter creates an application-level object containing ("wrapping") -the value ``2`` and another one containing the value ``3``. We talk -about them as ``W_Int(2)`` and ``W_Int(3)`` respectively. The -interpreter then calls the Standard Object Space with ``add(W_Int(2), -W_Int(3))``. - -The Object Space then examines the objects passed in, and delegates -directly to the ``add(intimpl, intimpl)`` function: since this is a -"direct hit", the multimethod immediately dispatches the operation to -the correct implementation, i.e., the one registered as the -implementation for this signature. - -If the multimethod doesn't have any registered functions for the exact -given signature, as would be the case for example for the expression -``2+3.0``, the multimethod tests if it can use coercion to find a -function with a signature that works. In this case we would coerce -``W_Int(2)`` to ``W_Float(2.0)`` in order to find a function in the -multimethod that has a correct signature. Note that the multimethod -mechanism is still considered a major refactoring target, since it is -not easy to get it completly right, fast and accurate. - -Application-level and interpreter-level execution and objects -============================================================= - -Since Python is used for implementing all of our code base, there is a -crucial distinction to be aware of: *interpreter-level* objects versus -*application level* objects. The latter are the ones that you deal with -when you write normal python programs. Interpreter-level code, however, -cannot invoke operations nor access attributes from application-level -objects. You will immediately recognize any interpreter level code in -PyPy, because all variable and object names start with a ``w_``, which -indicates that they are "wrapped" application-level values. - -Let's show the difference with a simple example. To sum the contents of -two variables ``a`` and ``b``, typical application-level code is ``a+b`` --- in sharp contrast, typical interpreter-level code is ``space.add(w_a, -w_b)``, where ``space`` is an instance of an object space, and ``w_a`` -and ``w_b`` are typical names for the *wrapped* versions of the two -variables. - -It helps to remember how CPython deals with the same issue: interpreter -level code, in CPython, is written in C, and thus typical code for the -addition is ``PyNumber_Add(p_a, p_b)`` where ``p_a`` and ``p_b`` are C -variables of type ``PyObject*``. This is very similar to how we write -our interpreter-level code in Python. - -Moreover, in PyPy we have to make a sharp distinction between -interpreter- and application-level *exceptions*: application exceptions -are always contained inside an instance of ``OperationError``. This -makes it easy to distinguish failures in our interpreter-level code from -failures appearing in a python application level program that we are -interpreting. - - -Application level is often preferable -------------------------------------- - -Application-level code is substantially higher-level, and therefore -correspondingly easier to write and debug. For example, suppose we want -to implement the ``update`` method of dict objects. Programming at -application level, we can write an obvious, simple implementation, one -that looks like an **executable definition** of ``update``, for -example:: - - def update(self, other): - for k in other.keys(): - self[k] = other[k] - -If we had to code only at interpreter level, we would have to code -something much lower-level and involved, say something like:: - - def update(space, w_self, w_other): - w_keys = space.call_method(w_other, 'keys') - w_iter = space.iter(w_keys) - while True: - try: w_key = space.next(w_iter) - except NoValue: break - w_value = space.getitem(w_other, w_key) - space.setitem(w_self, w_key, w_value) - -This interpreter-level implementation looks much more similar to the C -source code, although it is probably still more readable. In any case, -it should be obvious that the application-level implementation is -definitely more readable, more elegant and more maintainable than the -interpreter-level one. - -In fact, in almost all parts of PyPy, you find application level code in -the middle of interpreter-level code. Apart from some bootstrapping -problems (application level functions need a certain initialization -level of the object space before they can be executed), application -level code is usually preferable. We have an abstraction (called -'Gateway') which allows the caller of a function to remain ignorant of -whether a particular function is implemented at application or -interpreter level. - -Wrapping -======== - -The ``w_`` prefixes so lavishly used in the previous example indicate, -by PyPy coding convention, that we are dealing with *wrapped* objects, -that is, interpreter-level objects which the object space constructs to -implement corresponding application-level objects. Each object space -supplies ``wrap`` and ``unwrap`` operations that move between the two -levels for objects of simple built-in types; each object space also -implements other Python types with suitable interpreter-level classes -with some amount of internal structure. - -For example, an application-level Python ``list`` is implemented as an -instance of ``W_ListObject``, which has an instance attribute -``ob_item`` (an interpreter-level list which contains the -application-level list's items as wrapped objects) and another attribute -``ob_size`` which records the application-level list's length (we want -to be able to do "over-allocation" in ``ob_item``, for the same reasons -of performance that lead CPython to do it, and therefore the length of -``ob_item`` is allowed to be greater than the length of the -application-level list -- it is for this reason that the length in -question has to be explicitly recorded in ``ob_size``). - -See ``wrapping.txt`` for more details. - - -RPython, the Flow Object Space and translation -============================================== - -One of PyPy's -term objectives is to enable translation of our -interpreter and standard object space into a lower-level language. In -order for our translation and type inference mechanisms to work -effectively, we need to restrict the dynamism of our interpreter-level -Python code at some point. However, in the start-up phase, we are -completly free to use all kind of nice python constructs, including -metaclasses and execution of dynamically constructed strings. However, -when the initialization phase (mainly, the function -``objspace.initialize()``) finishes, all code objects involved need to -adhere to a (non-formally defined) more static subset of Python: -Restricted Python, also known as 'RPython'. - -The Flow Object Space then, with the help of our plain interpreter, -works through those initialized "RPython" code objects. The result of -this *abstract interpretation* is a flow graph: yet another -representation of a python program, but one which is suitable for -applying translation and type inference techniques. The nodes of the -graph are basic blocks consisting of Object Space operations, flowing -of values, and an exitswitch to one, two or multiple links which connect -each basic block to other basic blocks. - -The flow graphs are fed as input into the Annotator. The Annotator, -given entry point types, infers the types of values that flow through -the program variables. Here, one of the informal definitions of RPython -comes into play: RPython code is restricted in such a way that the -translator is able to compile low-level **typed** code. How much -dynamism we allow in RPython depends, and is restricted by, the Flow -Object Space and the Annotator implementation. The more we can improve -this translation phase, the more dynamism we can allow. In some cases, -however, it will probably be more feasible and practical to just get rid -of some of the dynamism we use in our interpreter level code. It is -mainly because of this trade-off situation that we don't currently try -to formally define 'RPython'. - -The actual low-level code (and, in fact, also other high-level code) is -emitted by "visiting" the type-annotated flow graph. Currently, we have -a Pyrex-producing backend, and a Lisp-producing backend. We use (a -slightly hacked version of) Pyrex to generate C libraries. Since Pyrex -also accepts plain non-typed python code, we can test translation even -though type annotation is not complete. - -Trace Object Space -================== - -A recent addition is the Trace Object space, which wraps a standard or -trivial object space in order to trace all object space operations, -frame creation, deletion and bytecode execution. The ease with which -the Trace Object Space was implemented at the Amsterdam Sprint -underlines the power of the Object Space abstraction. (Of course, the -previously-implemented Flow Object Space producing the flow graph -already was proof enough). - -There are certainly many more possibly useful Object Space ideas, such -as a ProxySpace that connects to a remote machine where the actual -operations are performed. At the other end, we wouldn't need to change -object spaces at all in order to extend or modify the interpreter, e.g. -by adding or removing some bytecodes. Thus, the interpreter and -object-space cooperation nicely splits the python runtime into two -reasonably-independent halves, cooperating along a reasonably narrow -interface, and suitable for multiple separate implementations. diff --git a/pypy/documentation/basicblock.asc b/pypy/documentation/basicblock.asc deleted file mode 100644 index 87991f754b..0000000000 --- a/pypy/documentation/basicblock.asc +++ /dev/null @@ -1,36 +0,0 @@ -the CtlFlowObjSpace is supposed to produce the following object-model -(currently just a linear list of objects referencing each other, sorry -no UML diagream :-) - -the reason we want this objectmodel as the source for doing the -translation is that otherwise we get very-hard-to-read low-level/pyrex -code with lots of gotos etc.pp. - -With the CtlFlow-object model we can basically reconstruct some -source-structure (without examining the bytecode). - -class BasicBlock: - .inputargs = [list-of-input-locals] - .locals = [list-of-all-locals-incluing-inputargs] - .operations = [list-of-operations] - .branch = <Branch or ConditionalBranch instance> - -class Variable: - pass - -class Constant: - .value = ... - -class SpaceOperation: - .opname = 'add' - .args = [list-of-variables] - .result = <Variable/Constant instance> - -class Branch: - .args = [list-of-variables] - .target = <BasicBlock instance> - -class ConditionalBranch: - .condition = <Variable instance> - .iftrue = <Branch instance> - .iffalse = <Branch instance> diff --git a/pypy/documentation/checking_ReST.txt b/pypy/documentation/checking_ReST.txt deleted file mode 100644 index f90d1f45fc..0000000000 --- a/pypy/documentation/checking_ReST.txt +++ /dev/null @@ -1,9 +0,0 @@ -1. You need to have docutils installed. -2. docutils/tools/buildhtml.py converts your ReST .txt files into .html - files, and complains when your document has errors. -3. There is one problem. It converts _every_ .txt to an .html, and there - is no option to just specify one file. I get around this problem by - making a directory and moving my new file there for testing, and then - moving it back when I am done. 0 Elegance points for that fix ... - - Laura
\ No newline at end of file diff --git a/pypy/documentation/cmodules.txt b/pypy/documentation/cmodules.txt deleted file mode 100644 index 816114b347..0000000000 --- a/pypy/documentation/cmodules.txt +++ /dev/null @@ -1,208 +0,0 @@ -PyPy shares a common need with other python reimplementations: -reimplementing c-coded modules in Python. The more plain -python code there is the less code needs to be ported either -Python or to the implementation language (e.g. C# for IronPython_). - -In the course of an email discussion_ Anthony Baxter came up with -a annotated list of modules and some rough categorization of -the to-be-or-has-been ported modules. The list doesn't claim -to be exhaustive and should certainly be worked on. - -Probably doable ---------------- - -These ones are probably achievable, although you might think -otherwise. - -_bisectmodule - already exists - -_codecsmodule - -_csv - already exists? - -_heapqmodule - already exists - -_hotshot - -_localemodule - -_randommodule - already exists but with a lesser generator - -_sre - already exists? - -_weakref - -arraymodule - -audioop - -binascii - -cPickle - already exists - -cStringIO - already exists - -cgensupport - -cmathmodule - -collectionsmodule - Raymond Hettinger has an ASPN recipe (which has a 'deque' drop-in replacement): - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/259179 - -cryptmodule - -datetimemodule - already exists - -gcmodule - -imageop - -imgfile - -itertoolsmodule - the docs include pure python equivalents in a form -suitable for cut and pasting - -mathmodule - -md5module - done - -operator - done in PyPy - -parsermodule - already exists? - -rgbimgmodule - -sgimodule - -shamodule - done - -stropmodule - -structmodule - -symtablemodule - -syslogmodule - -timingmodule - -unicodedata - -yuvconvert - -zipimport - -Deprecated ----------- - -These modules are deprecated - -regexmodule - -regexpr - -rotormodule - -mpzmodule - -Links in an external library ----------------------------- - -These are all wrappers around external C libraries. Rewriting them -from scratch is really not practical. - -_bsddb - -_curses_panel - -_cursesmodule - -_ssl - -_tkinter - -almodule - -bsddbmodule - -bz2module - -cdmodule - -clmodule - -dbmmodule - -flmodule - -fmmodule - -gdbmmodule - -glmodule - -nismodule - -puremodule - -pyexpat - -readline (replace with pyrepl?) - -svmodule - -termios - -tkappinit - -zlibmodule - -System specific ---------------- - -These are modules that are wrappers around system calls -and the like. Rewriting them in pure Python will still -need a way to invoke the underlying calls - and will probably -be different for each platform (pypy, pycore, ironpython, &c). -A lot of these are also a mass of #ifdefs for various broken -operating systems. - -dlmodule - -errnomodule - -fcntlmodule - -fpectlmodule - -fpetestmodule - -grpmodule - -linuxaudiodev - -mmapmodule - -ossaudiodev - -posixmodule - -pwdmodule - -resource - -selectmodule - -signalmodule - -socketmodule - -sunaudiodev - -threadmodule - -timemodule - - -.. _IronPython: http://ironpython.com/ -.. _discussion: http://codespeak.net/pipermail/pypy-dev/2004q3/001509.html diff --git a/pypy/documentation/coding-style.txt b/pypy/documentation/coding-style.txt deleted file mode 100644 index 34abf2e90e..0000000000 --- a/pypy/documentation/coding-style.txt +++ /dev/null @@ -1,67 +0,0 @@ -===================================== -Coding Style -===================================== - - -Naming and environment ------------------------------ - -- directories/modules/namespaces are always **lowercase** - -- classes are **CamelCase** - -- never use plural names in directory and file names - -- ``__init__.py`` is always empty except for ``"pypy/objspace/*"`` - -- functions/methods are lowercase and ``'_'-separated`` (if - you need to separate at all) - -- not more than 4 directory nesting levels - -- it's appreciated if you manage to name files in a directory - so that tab-completion on the shell level is as easy as possible. - -Committing ----------- - -- it's nice to write good log messages because several people - are reading the diffs. - -- if you add (text/py) files to the repository then please run - pypy/tool/fixeol in that directory. This will make sure - that the property 'svn:eol-style' is set to native which - allows checkin/checkout in native line-ending format. - -Object Spaces ---------------- - -- objectspace classes are always spelled "ObjSpace". e.g. - - - TrivialObjSpace - - StdObjSpace - - FlowObjSpace - -- at interpreter level and in ObjSpace all boxed values - have a leading ``w_`` to indicate "wrapped values". This - includes w_self. Don't use ``w_`` in application level - python only code. - -Test conventions ----------------- - -- adding features usually requires adding appropriate tests. - (It often even makes sense to first write the tests so that you are - sure that they actually can fail.) - -- All over the pypy source code there are test/ directories - which contain unittests. Such scripts can usually be executed - directly or are collectively run by pypy/test_all.py - -- each test directory needs a copy of pypy/tool/autopath.py which - upon import will make sure that sys.path contains the directory - where 'pypy' is in. - -- see some more information about tests at the test-design_ document. - -.. _test-design: ../devel/testdesign.html diff --git a/pypy/documentation/controlflow.txt b/pypy/documentation/controlflow.txt deleted file mode 100644 index bdf9455dd2..0000000000 --- a/pypy/documentation/controlflow.txt +++ /dev/null @@ -1,162 +0,0 @@ -FlowObjSpace -============ - -Introduction ------------- - -The FlowObjSpace generates a control-flow graph from a function. This graph also contains a trace of the individual operations, so that it is actually just an alternate representation for the function. - -The FlowObjSpace is an object space, which means that it exports the standard object space interface and it is driven by the interpreter. - -The basic idea is that if the interpreter is given a function, e.g.:: - - def f(n): - return 3*n+2 - -it will do whatever bytecode dispatching and stack-shuffling needed, during which it issues a sequence of calls to the object space. The FlowObjSpace merely records these calls (corresponding to "operations") in a structure called a basic block. To track which value goes where, the FlowObjSpace invents placeholder "wrapped objects" and give them to the interpreter, so that they appear in some next operation. - -For example, if the placeholder ``v1`` is given as the argument to the above function, the interpreter will call ``v2 = space.mul(space.wrap(3), v1)`` and then ``v3 = space.add(v2, space.wrap(2))`` and return ``v3`` as the result. During these calls the FlowObjSpace will record a basic block:: - - Block(v1): # input argument - v2 = mul(constant(3), v1) - v3 = add(v2, constant(2)) - - -Joining basic blocks --------------------- - -A basic block ends in one of two cases: when the interpreters calls ``is_true()``, or when a joinpoint is reached. - -* A joinpoint is a specially marked position in the bytecode. This is the only bytecode dependency in FlowObjSpace. Intuitively, there should be one joinpoint at each bytecode position where two different paths can "join" together, e.g. the entry point of a loop. A joinpoint forces a basic block to end and the next one to begin. A snapshot of the current frame is taken (a FrameState) and recorded on the joinpoint. If the control flow later reaches this point again, we put a "backwards" jump to the old basic block that starts at this point. (The control flow is actually just a graph, with basic blocks pointing to each other, so there is not really a notion of "backwards".) - -* If the interpreter calls ``is_true()``, the FlowObjSpace doesn't generally know if the answer should be True or False, so it puts a conditional jump and generates two successor blocks for the current basic block. There is some trickery involved so that the interpreter is fooled into thinking that ``is_true()`` first returns False (and the subsequent operations are recorded in the first successor block), and later the *same* call to ``is_true()`` also returns True (and the subsequent operations go this time to the other successor block). - - -Passing variables between basic blocks --------------------------------------- - -XXX - - -FlowExecutionContext --------------------- - -The FlowExecutionContext is a modified ExecutionContext that drives the interpreter in a fashion that is quite different from the one needed for normal interpretation. XXX - - -Interface ---------- - -We make one instance of FlowExecutionContext per function to analyse. The instance has a cache of FrameStates to detect when the control is looping (``self.joinpoints``). XXX - - -Old stuff to remove -------------------- - -:: - - Hello again - - def f(i): - return g(i)+2 - def g(i): - return i+1 - - f(3) - tspace: - pyxcode.putln('def f(%s):' % sig) - res = frame.eval(executioncontext) - pyxcode.putln('return %s' % res) - - - Pyrex: def f(v1): - dis.dis(f) --> - 2 0 LOAD_GLOBAL 0 (g) - w_result = space.getitem(f.w_globals, w_varname) - - 3 LOAD_FAST 0 (i) - 6 CALL_FUNCTION 1 - space.call(w_function, w_arguments, w_kwds) - space.call(w_g, ("v1",), {}) - Pyrex: v2 = g(v1) - 9 LOAD_CONST 1 (2) - space.wrap(2) - 12 BINARY_ADD - space.add("v2", "constant 2") - Pyrex: v3 = v2 + 2 - 13 RETURN_VALUE - 14 LOAD_CONST 0 (None) - 17 RETURN_VALUE - Pyrex: return v3 - - - Result: - def f(v1): - v2 = g(v1) - v3 = v2 + 2 - return v3 - - - def h(i, j): - if i < 0: - i = j - return i+1 - - Pyrex: def h(v1, v2): - --> interpreter - 3 0 LOAD_FAST 0 (i) - 3 LOAD_CONST 1 (0) - 6 COMPARE_OP 0 (<) - "v3" = space.lt("v1", "constant 0") - Pyrex: v3 = v1 < 0 - 9 JUMP_IF_FALSE 10 (to 22) - space.is_true("v3") - Pyrex: if v3: cinline "goto Label1;" - 12 POP_TOP - - 4 13 LOAD_FAST 1 (j) - 16 STORE_FAST 0 (i) - 19 JUMP_FORWARD 1 (to 23) - >> 22 POP_TOP - - Pyrex: cinline "LabelBytecode23:" # just in case for later - 5 >> 23 LOAD_FAST 0 (i) - 26 LOAD_CONST 2 (1) - 29 BINARY_ADD - space.add("v1", "constant 2") - Pyrex: v4 = v1 + 2 - 30 RETURN_VALUE - 31 LOAD_CONST 0 (None) - 34 RETURN_VALUE - Pyrex: return v4 - - pyrex: cinline "Label1:" - 12 POP_TOP - - 4 13 LOAD_FAST 1 (j) - 16 STORE_FAST 0 (i) - (in the interpreter fastlocals now: i="v2" j="v2") - 19 JUMP_FORWARD 1 (to 23) - >> 22 POP_TOP - - (bytecode 23 already seen!) - Pyrex: v1 = v2 - Pyrex: cinline "goto LabelBytecode23;" - 5 >> 23 LOAD_FAST 0 (i) - - def h(i, j): - if i < 0: - i = j - return i+1 - - def h(v1, v2): - v3 = v1 < 0 - if v3: cinline "goto label1;" - - cinline "labelBytecode23:" - v4=v1+1 - return v4 - cinline "label1:" - v1=v2 - cinline "goto labelBytecode23;" - diff --git a/pypy/documentation/developers.txt b/pypy/documentation/developers.txt deleted file mode 100644 index e55328a612..0000000000 --- a/pypy/documentation/developers.txt +++ /dev/null @@ -1,20 +0,0 @@ - - -pypy project developers and contributors:: - - Armin Rigo Holger Krekel - Samuele Pedroni Christian Tismer - Michael Hudson Laura Creighton - Jacob Hallen Alex Martelli - Richard Emslie Seo Sanghyeon - Anna Ravencroft Guido Van Rossum - Anders Lehmann Tomek Meka - Jiwon Seo Stephan Diehl - Jens-Uwe Mager Jonathan David Riehl - Guenter Jantzen Stefan Schwarzer - Dinu Gherman Patrick Maupin - Rocco Moretti Andreas Friedge - Theo de Ridder Olivier Dormond - Bob Ippolito Marius Gedminas - -PyPy Webpage: http://codespeak.net/pypy diff --git a/pypy/documentation/goals.txt b/pypy/documentation/goals.txt deleted file mode 100644 index 662decddfc..0000000000 --- a/pypy/documentation/goals.txt +++ /dev/null @@ -1,376 +0,0 @@ -**Infrastructure and tools** - -Supporting the PyPy project by producing and enhancing the tools. - -Support the development process with reusing existing or developing -new debugging opensource tools. - -- provide access over http/https and ssh server - -- build extensions for automatic document extraction - -- implement a search facility over all content - -- maintain and enhance website infrastructure - -- setup a mirror repository which is kept up-to-date and which - can be used readonly in case of failure of the main repository. - -- Design a strict backup policy. - -- help with automated testing - - XXX add what more do we want to do? - -- http-server to present runtime/introspection information aiding - debugging and understanding of PyPy internals - -- automated (unit-)testing framework with html/pdf reports - -**Synchronisation with Standard Python** - -- Keeping PyPy in sync with potential changes to Standard Python. - -- Support a subset of the CPython API for compatibility with extension modules. - -- Facilitate porting of extension modules to PyPy. - -- What's new since 2.2? see http://www.python.org/doc/2.3.3/whatsnew -- Thank you amk! - -**PEPS** - -+ PEP 218: A Standard Set Datatype -- set module -- - -+ PEP 263 defining Python Source Code encodings - -+ PEP 273 Importing Modules from Zip Archives -- zipimport module -- - -+ PEP 277: Unicode file name support for Windows NT - -+ PEP 278: Universal Newline Support - -+ PEP 279: enumerate() - -+ PEP 282: The logging Package - -+ PEP 285: A Boolean Type - -+ PEP 293: Codec Error Handling Callbacks - -+ PEP 301: Package Index and Metadata for Distutils - -+ PEP 302: New Import Hooks - -+ PEP 307: Pickle Enhancements - -**Check and see we have implemented these** - -* Extended Slices - -* The yield statement is now always a keyword - -* new built-in function enumerate() - -* Two new constants, True and False - -* The int() type constructor will now return a long integer instead of - raising an OverflowError when a string or floating-point number is too - large to fit into an integer. This can lead to the paradoxical result - that isinstance(int(expression), int) is false. - -* Built-in types now support the extended slicing syntax - -* A new built-in function, sum(iterable, start=0), adds up the numeric - items in the iterable object and returns their sum. sum() only accepts - numbers, meaning that you can't use it to concatenate a bunch of strings. - -does it handle mixed floats and ints? - -* list.insert(pos, value) used to insert value at the front of the list - when pos was negative. The behaviour has now been changed to be consistent - with slice indexing, so when pos is -1 the value will be inserted before - the last element, and so forth. - -so to insert at the front? - -* list.index(value), which searches for value within the list and - returns its index, now takes optional start and stop arguments to limit - the search to only part of the list. - -* Dictionaries have a new method, pop(key[, default]), that returns the - value corresponding to key and removes that key/value pair from the - dictionary. If the requested key isn't present in the dictionary, - default is returned if it's specified and KeyError raised if it isn't. - -* There's also a new class method, dict.fromkeys(iterable, value), that - creates a dictionary with keys taken from the supplied iterator iterable - and all values set to value, defaulting to None. - - Also, the dict() constructor now accepts keyword arguments to simplify - creating small dictionaries: - - >>> dict(red=1, blue=2, green=3, black=4) - {'blue': 2, 'black': 4, 'green': 3, 'red': 1} - - -* The assert statement no longer checks the __debug__ flag, so you can no - longer disable assertions by assigning to __debug__. Running Python with - the -O switch will still generate code that doesn't execute any assertions. - -So what if you want to disable assertions just within one module? - -* Most type objects are now callable, so you can use them to create new - objects such as functions, classes, and modules. (This means that the - new module can be deprecated in a future Python version, because you - can now use the type objects available in the types module.) For example, - you can create a new module object with the following code: - - >>> import types - >>> m = types.ModuleType('abc','docstring') - >>> m - <module 'abc' (built-in)> - >>> m.__doc__ - 'docstring' - -* A new warning, PendingDeprecationWarning was added to indicate features - which are in the process of being deprecated. The warning will not be - printed by default. To check for use of features that will be deprecated - in the future, supply -Walways::PendingDeprecationWarning:: on the - command line or use warnings.filterwarnings(). - -* The process of deprecating string-based exceptions, as in - raise "Error occurred", has begun. Raising a string will now trigger - PendingDeprecationWarning. - -* Using None as a variable name will now result in a SyntaxWarning warning. - In a future version of Python, None may finally become a keyword. - -* The xreadlines() method of file objects, introduced in Python 2.1, - is no longer necessary because files now behave as their own - iterator. xreadlines() was originally introduced as a faster way to - loop over all the lines in a file, but now you can simply write - for line in file_obj. File objects also have a new read-only encoding - attribute that gives the encoding used by the file; Unicode strings - written to the file will be automatically converted to bytes using - the given encoding. - -* The method resolution order used by new-style classes has changed, - though you'll only notice the difference if you have a really - complicated inheritance hierarchy. Classic classes are unaffected by - this change. Python 2.2 originally used a topological sort of a - class's ancestors, but 2.3 now uses the C3 algorithm as described in - the paper *A Monotonic Superclass Linearization for Dylan*. To - understand the motivation for this change, read Michele Simionato's - article *Python 2.3 Method Resolution Order*, or read the thread - on python-dev starting with the message at - http://mail.python.org/pipermail/python-dev/2002-October/029035.html. - Samuele Pedroni first pointed out the problem and also implemented the fix - by coding the C3 algorithm. - -* Python runs multithreaded programs by switching between threads - after executing N bytecodes. The default value for N has been - increased from 10 to 100 bytecodes, speeding up single-threaded - applications by reducing the switching overhead. Some multithreaded - applications may suffer slower response time, but that's easily - fixed by setting the limit back to a lower number using - sys.setcheckinterval(N). The limit can be retrieved with the new - sys.getcheckinterval() function. - -* One minor but far-reaching change is that the names of extension - types defined by the modules included with Python now contain the - module and a "." in front of the type name. For example, in Python - 2.2, if you created a socket and printed its __class__, you'd get - this output: - - >>> s = socket.socket() - >>> s.__class__ - <type 'socket'> - - In 2.3, you get this: - - >>> s.__class__ - <type '_socket.socket'> - -* One of the noted incompatibilities between old- and new-style - classes has been removed: you can now assign to the __name__ and - __bases__ attributes of new-style classes. There are some - restrictions on what can be assigned to __bases__ along the lines of - those relating to assigning to an instance's __class__ attribute. - -**String Changes** - -* The in operator now works differently for strings. Previously, when - evaluating X in Y where X and Y are strings, X could only be a - single character. That's now changed; X can be a string of any - length, and X in Y will return True if X is a substring of Y. If X - is the empty string, the result is always True. - - >>> 'ab' in 'abcd' - True - >>> 'ad' in 'abcd' - False - >>> '' in 'abcd' - True - -* The strip(), lstrip(), and rstrip() string methods now have an - optional argument for specifying the characters to strip. The - default is still to remove all whitespace characters: - -* The startswith() and endswith() string methods now accept negative - numbers for the start and end parameters. - -* Another new string method is zfill(), originally a function in the - string module. zfill() pads a numeric string with zeros on the left - until it's the specified width. Note that the % operator is still - more flexible and powerful than zfill(). - -* A new type object, basestring, has been added. Both 8-bit strings - and Unicode strings inherit from this type, so isinstance(obj, basestring) - will return True for either kind of string. It's a completely abstract - type, so you can't create basestring instances. - -* Interned strings are no longer immortal and will now be - garbage-collected in the usual way when the only reference to them - is from the internal dictionary of interned strings. - -**Replace PyPy Core** - -Building a complete Python interpreter written in Python, -using a subset of Python that avoids dynamic featureslll -which would impair the objectives of RPython. - -- Design and implement the PyPy bytecode interpreter and - build an object space library in RPython. - The resulting interpreter must cover the complete Python - language specification. -- mostly done, test coverage. - -- Port the built-in Python library to PyPy (functions, types and - modules currently implemented in C) -- types needs refactoring, - the rest is mostly done. Anthony Baxter has given us a very useful - list of the cmodules_ we need to consider porting. - -- Decide on a case-by-case basis which features are to - be implemented using RPython or just general Python. -- moving target. - -- Implement a Python parser and bytecode compiler in Python. - -- we have something, but it is not well integrated. - -- A partial Python implementation that passses 75% of the official - Python test suite that don't depend on extension modules. - -- write this test and then pass it. - -**The PyPy Translation** - -- Analysis and translation of the PyPy core into efficient low-level code - (C, Pyrex, Java, others). - moving target - -- Provide a Runtime Library for the translated versions - of PyPy. - not even started - -- Create a code analysis tool for a subset of the Python - language (RPython). Coordinate the definition of RPython - being the implementation language of the core. - - - we can analyse functions, but not classes, modules, spaces etc. - -- Complete implementation of Python, conforming to the language - definition and passing all relevant tests of the official Python - test suite. -- global goal, which we will think more about when - it is very close to being done. - -**Stackless Integration + More Performance Hacks** - -- Identification and Implementation of Optimisations through modifications - of the Translator. - -- Enable Massive Parallelism in a Single Thread. - -- Provide support for real-time parallelism. - -- Allow Pickling of a Running Program. - -- Enhance the translator to support continuation passing - style by integrating technology from the Stackless project. - -- Implement the necessary runtime system to support massive parallelism. - -- Implement a single-threaded, pre-emptive scheduler with - priorities, complementing the OS threads. - -- Study approaches concerning code size vs. speed trade-offs. - -- Implement and compare different object layout and memory management - strategy or strategies. - -- Enhance multimethod dispatching. - -- Implement schemes of pointer tagging. - -- Create reports and merge the results back into the optimization effort. - -**Psyco Integration** - -- Outperform the state-of-the art (Psyco, Stackless). - -- Enhance PyPy to dynamically adapt to its run-time environment and - to the characteristics of the running program. Dramatically - increase speed by enabling Just-In-Time compilation and - specialization. - -- Address multiple processor architectures. - -- Apply and enhance techniques from the Psyco project. Promote parts - of the static translator to be used for run-time specialization. - -- Design and implement a back-end component for dynamically emitting - machine code for multiple processor architectures. Enable dynamic - foreign function calls. - -- Research optimisation heuristics for the Just-In-Time compiler. - -- A Just-In-Time compiler for PyPy !!! Outperform the - state-of-the art (Psyco, Stackless). - -**Embed in Specialized Hardware** - -if we get funding. - -**Implement Security, Distribution and Persistence** - -- Research and validate the flexibility of PyPy by building key middleware - features into the language itself. - -- Analyze and implement security models at the language level. - -- Implement the "RExec" restricted execution model. (It was removed - from the official Python implementation because it is false security.) - -- Analyze and implement distributed execution models at the language level. - -- Implement network-transparent execution of Python programs. (Typical - libraries require programs to be aware of the remote execution model.) - -- Analyze and implement persistence at the language level. -- talk to - Patrick O'Brien who is really hot for the idea. - -- Implement an orthogonally persistent object space for Python programs. - (Persistence is never fully orthogonal without advanced language support.) - -**PyPy A la Carte** - -- Develop tools that will allow to chose from available features and runtime - restrictions to build a custom PyPy version. - -- Analyse and integrate all results from the results of other workpackages. - This involves identifying and resolving conflicts which could prevent - a combintation of desired optimization goals. - -- Implement user interfaces to select features and runtime restrictions. - Provide a way to automatically build a PyPy custom version for different - memory, performance and extension requirements. - -- Make a build- and configuration tool that allows to build custom PyPy - versions suitable for specialized environments such as small or very large - computing devices. - -.. _cmodules: http://codespeak.net/pypy/index.cgi?doc/cmodules.html
\ No newline at end of file diff --git a/pypy/documentation/howtopypy.txt b/pypy/documentation/howtopypy.txt deleted file mode 100644 index c92d3aa2fc..0000000000 --- a/pypy/documentation/howtopypy.txt +++ /dev/null @@ -1,371 +0,0 @@ -================================== -Getting started with PyPy -================================== - -.. contents:: -.. sectnum:: - -Just the facts -============== - - -Checking out & running PyPy as a two-liner ------------------------------------------- - -There is no public release yet, but you can easily do:: - - svn co http://codespeak.net/svn/pypy/dist dist-pypy - -and after checkout you can get a PyPy interpreter via:: - - python dist-pypy/pypy/interpreter/py.py - -have fun :-) - -You can also go the more `detailed version`_ of this -two-liner. - -Browsing via HTTP and getting an svn client -------------------------------------------- - -You can `browse the pypy source code`_ directly via http. -(sorry, viewcvs is still not stable enough with subversion). -And here is some information to `install a subversion client`_. - -.. _`install a subversion client`: howtosvn.html -.. _`browse the pypy source code`: http://codespeak.net/svn/pypy/dist - -coding style and testing ------------------------- - -We keep a strong focus on testing because we want to be able -to refactor things all the time (without proper automated -testing this would become very hard and fragile). - -For an overview of how we organize our codebase please look at our -`coding-style document`_. - -For running all PyPy tests you can issue:: - - cd dist-pypy/pypy/ - python test_all.py - -test_all.py really is another name for `py.test`_ which is a testing -tool working from the current directory unless you specify -filename/directory arguments. - -If you want to have write access to the codespeak respository -please send a mail to <b>jum at anubis han de</b> or <b>hpk at merlinux de</b> -in order to ask for a username and password. Please mention what you want to do -within the pypy project. Even better, come to our next sprint so that we can get -to know you. - -.. _`documentation start page`: http://codespeak.net/pypy/index.cgi?doc/index.html -.. _`coding-style document`: http://codespeak.net/pypy/index.cgi?doc/coding-style.html -.. _`py.test`: /py/current/doc/test.html - - -PyPy Documentation -================== - -Viewing documentation ---------------------- - -PyPy documentation is generated from reST textfiles in the pypy/documentation directory of -our pypy-subversion repository. Go to the `documentation start page`_ and hit -"recently modified" to get a list of recently modified documents. While in -"doc-view" you also have a navigation area on the left side which maps all -documentation files. - -Adding documentation --------------------- - -Please add new or updated documentation by checking it in to the appropriate -directory in subversion, usually under -http://codespeak.net/svn/pypy/dist/pypy/documentation - -+ Remember to run ``svn up`` **before** doing any commit. -+ All filenames should be lowercase, and documentation should be .txt files. -+ Mark-up the documentation with reST so it can generate a pretty html version. -+ On the server side a commit on the doc-subtree will immediately update the webpage. - -*Note* If you don't markup the textfile, it'll still be checked in, but when docutils -runs the parser, it'll look ugly on the website. So run docutils yourself before you commit it. - -Some reST basics: ------------------ - -There should be a title on your page. Do it like this:: - - Here is my Title - ================== - - Here is a section title - ------------------------- - -Make sure you have a blank line after your = or - lines or it will give you an error. -For marking a block of code so it'll look right, you can:: - - Put a line of text ending with :: - indent your code at least one space - my code - more code - even more code - still more code - -End of the "block" occurs whenever you unindent back to the same level as the -text with the ``::`` at the end. - -Using an underscore after a word like this_ will make reST think you want a hyperlink. -To avoid that (especially with things like ``wrap_``), you can use the `` back quote `` -to mark it as plain text. - -You can get more info on reST markup at http://docutils.sourceforge.net/docs/rst/quickref.html - -Checking your work ------------------------- - -In order to make sure that what you commit looks reasonably pretty (or at least not -entirely broken), you'll need to run the ``docutils`` parser on it. Unless you've -installed it in the past, you probably don't have it installed. Open IDLE (or any -Python interactive environment) and try "import docutils". If it imports, hooray! -Otherwise, you'll need to download it. - -Go to sourceforge and download the ``snapshot`` version. Install it. - -*Note to Debian users:* Be sure you installed ``python2.2-dev``, which includes ``distutils``, -before trying to install ``docutils``. - -Once you have ``docutils`` installed, you can use it go to your shell and use it like this:: - - $ python ~/mypath/docutils/tools/buildhtml.py - /// Processing directory: /home/anna/downloads/arObjSpaceDoc - ::: Processing .txt: howtosvn.txt - ::: Processing .txt: index.txt - -**WARNING** This will process **all** text documents in the directory and any subdirectories. -I prefer to work on text in a separate directory, run the ``docutils`` parser to see what it -looks like, then copy the .txt file over to my local /doc checkouts to commit it. - -Use a browser menu to go to ``File: Open: filename.html`` then you can see what it looks -like. Look at the command shell to see what errors you've got on which lines and fix it -in your textfile. You can then re-run the buildhtml.py script and see what errors you get. -After it's fixed, you can commit the .txt file and it'll automagically be turned into html -viewable on the website. - - -Here are some sample reST textfiles to see what it looks like: - -+ ObjectSpace_ -+ ObjectSpaceInterface_ - -.. _this: http://docutils.sourceforge.net/docs/rst/quickref.html -.. _ObjectSpace: objspace/objspace.html -.. _ObjectSpaceInterface: objspace/objspaceinterface.html - - -.. _`detailed version`: - -The long'n detailed version -=========================== - -PyPy sources can be browsed on the web at: - - http://codespeak.net/svn/pypy/dist - -Once you are ready to download and try PyPy out, -follow these instructions, which assume that you -are working in a DOS box (Windows) or terminal (MacOS/X or Linux). - -1. Download subversion_ if you do not already have it. - -2. Change to the directory where you wish to install the source tree, - and use subversion to download the source:: - - svn co http://codespeak.net/svn/pypy/dist dist-pypy - - This will create a directory named ``dist-pypy``, and will get - you the PyPy source in ``dist-pypy/pypy`` and documentation - files in ``dist-pypy/pypy/documentation``. - -3. To start interpreting Python with PyPy, use Python 2.3 or greater:: - - cd dist-pypy/pypy/interpreter - python py.py - - After a few seconds, you should be at the PyPy prompt, which is - the same as the Python prompt, but with an extra ">". - -4. Now you are ready to start running Python code. Some real Python - modules will not run yet, and others will run too slowly to be - worth waiting for, but a few are fun to run:: - - >>>> import pystone - >>>> pystone.main(10) - - Note that this is a slightly modified version of pystone -- the - original version does not accept the parameter to main(). The - parameter is the number of loops to run through the test, and the - default is 50000, which is far too many to run in a reasonable time - on the current PyPy implementation. - - -5. To list the PyPy interpreter command line options, type:: - - python py.py --help - - As an example of using PyPy from the command line, you could type:: - - python py.py -c "import pystone; pystone.main(10)" - - Alternatively, as with regular Python, you can simply give a - script name on the command line:: - - python py.py ../appspace/pystone.py - - (Note that this will run "forever" -- actually, "just" for many - hours, with the current implementation of PyPy.) - - -6. The PyPy project uses test-driven-development. Right now, there are - a couple of different categories of tests which you can run. - To run all the unit tests:: - - cd dist-pypy/pypy - python test_all.py - - Alternatively, you may run subtests by going to the correct subdirectory - and running them individually:: - - cd dist-pypy/pypy - python test_all.py module/test/test_builtin.py - - ``test_all.py`` is actually just a synonym for `py.test`_ which is - our external testing tool. If you have installed that then you - can as well just issue ``py.test DIRECTORY_OR_FILE`` in order - to perform test runs. - - Finally, there are some more advanced tests (which are derived from - some of the standard CPython tests). These are not part of the unit - tests, because they take longer than the standard unit tests:: - - cd dist-pypy/pypy/interpreter - python py.py ../appspace/builtin_types_test.py - -Trying out the translator -========================= - -The translator is a tool based on the PyPy interpreter which can translate -sufficiently static Python programs into low-level code. - -1. Download and install `Dot Graphviz`_. - -2. Download and install Pygame_ if you do not already have it. - -3. Type:: - - cd dist-pypy/pypy/translator - python -i translator.py - - Test snippets of translatable code are provided in the file - ``test/snippet.py``. For example:: - - >>> t = Translator(test.is_perfect_number) - >>> t.simplify() - >>> t.view() - -4. We have a type annotator that can completely infer types for functions like - ``is_perfect_number``:: - - >>> t.annotate([int]) - >>> t.view() - - Move the mouse over variable names (in red) to see their inferred types. - -5. The graph can be turned into Pyrex code, with types if ``annotate()`` was - called:: - - >>> print t.pyrex() - >>> f = t.compile() - >>> f(28) - 1 - - Note how the strange-looking Pyrex code is unrelated to the original - Python source code. This is because the Pyrex code is generated from the - graph only, without reference to the original source. - - -Where to start reading the sources -================================== - -PyPy is made from parts that are relatively independent from each other. -You should start looking at the part that attracts you most: - -* `dist-pypy/pypy/interpreter`_ contains the basic interpreter: bytecode dispatcher - in pyopcode.py_, frame and code objects in eval.py_ and pyframe.py_, - function objects and argument passing in function.py_ and argument.py_, - the object space interface definition in baseobjspace.py_, modules in - module.py_ and extmodule.py_. Core types supporting the interpreter are - defined in typedef.py_. - -* `dist-pypy/pypy/objspace/std`_ contains the `Standard object space`_. The main file - is objspace.py_. For each type, the files ``xxxtype.py`` and - ``xxxobject.py`` contain respectively the definition of the type and its - (default) implementation. - -* `dist-pypy/pypy/objspace`_ contains a few other object spaces: the trivial one - (but let's forget about it), the trace_ one, the flow_ one. The latter - is a relatively short piece of code that builds the control flow graphs - when the interpreter runs in it. - -* `dist-pypy/pypy/translator`_ contains the code analysis and generation stuff. - Start reading from translator.py_, from which it should be easy to follow - the pieces of code involved in the various translation phases. - -* `dist-pypy/pypy/annotation`_ contains the data model for the type annotation that - can be inferred about a graph. The graph "walker" that uses this is in - `dist-pypy/pypy/translator/annrpython.py`_. - - -To learn more -============= - -* To learn more about PyPy and its development process, head - read around in the documentation_ and the wiki_, and consider - subscribing to the `mailing lists`_ (or simply - read the archives online) or show up irc.freenode.net:6667, channel #pypy. - -* To help PyPy become Python-the-next-generation, you may write some - `unit tests`_ and file some `bug reports`_ (although we are not really - actively using the issue tracker yet, watch out :-) - --------------------------------------------------------------------------------- - -.. _subversion: http://codespeak.net/pypy/index.cgi?doc/howtosvn.html -.. _Dot Graphviz: http://www.research.att.com/sw/tools/graphviz/ -.. _Pygame: http://www.pygame.org/ -.. _dist-pypy/pypy/interpreter: http://codespeak.net/svn/pypy/dist/pypy/interpreter/ -.. _pyopcode.py: http://codespeak.net/svn/pypy/dist/pypy/interpreter/pyopcode.py -.. _eval.py: http://codespeak.net/svn/pypy/dist/pypy/interpreter/eval.py -.. _pyframe.py: http://codespeak.net/svn/pypy/dist/pypy/interpreter/pyframe.py -.. _function.py: http://codespeak.net/svn/pypy/dist/pypy/interpreter/function.py -.. _argument.py: http://codespeak.net/svn/pypy/dist/pypy/interpreter/argument.py -.. _baseobjspace.py: http://codespeak.net/svn/pypy/dist/pypy/interpreter/baseobjspace.py -.. _module.py: http://codespeak.net/svn/pypy/dist/pypy/interpreter/module.py -.. _extmodule.py: http://codespeak.net/svn/pypy/dist/pypy/interpreter/extmodule.py -.. _typedef.py: http://codespeak.net/svn/pypy/dist/pypy/interpreter/typedef.py -.. _dist-pypy/pypy/objspace/std: http://codespeak.net/svn/pypy/dist/pypy/objspace/std/ -.. _Standard object space: http://codespeak.net/pypy/index.cgi?doc/stdobjspace.html -.. _objspace.py: http://codespeak.net/svn/pypy/dist/pypy/objspace/std/objspace.py -.. _dist-pypy/pypy/objspace: http://codespeak.net/svn/pypy/dist/pypy/objspace/ -.. _trace: http://codespeak.net/svn/pypy/dist/pypy/objspace/trace.py -.. _flow: http://codespeak.net/svn/pypy/dist/pypy/objspace/flow/ -.. _dist-pypy/pypy/translator: http://codespeak.net/svn/pypy/dist/pypy/translator/ -.. _translator.py: http://codespeak.net/svn/pypy/dist/pypy/translator/translator.py -.. _dist-pypy/pypy/annotation: http://codespeak.net/svn/pypy/dist/pypy/annotation/ -.. _dist-pypy/pypy/translator/annrpython.py: http://codespeak.net/svn/pypy/dist/pypy/translator/annrpython.py -.. _mailing lists: http://codespeak.net/pypy/index.cgi?lists -.. _documentation: http://codespeak.net/pypy/index.cgi?doc -.. _wiki: http://codespeak.net/moin/pypy/moin.cgi/FrontPage?action=show -.. _unit tests: http://codespeak.net/pypy/index.cgi?doc/testdesign.html -.. _bug reports: http://codespeak.net/issues/pypy/ diff --git a/pypy/documentation/howtosvn.txt b/pypy/documentation/howtosvn.txt deleted file mode 100644 index c3a711b507..0000000000 --- a/pypy/documentation/howtosvn.txt +++ /dev/null @@ -1,153 +0,0 @@ -================= -subversion -================= - -The PyPy codebase, documentation and web pages are controlled by subversion. -If you already know how to use it here is the URL you need to interact -with subversion: - -``http://codespeak.net/svn/pypy/trunk/`` - -If you don't know what to do then Jens-Uwe Mager has prepared some -installation files which should help you to install subversion on -your computer. - -+ Download Unix source tarball or prepackaged versions_ for MacOS, Windows, FreeBSD and Linux - -+ Additional information for Windows users: - - * See Microsoft website_ if you have .DLL issues. - - * Windows Installer file for Tortoise SVN (like Tortoise CVS) GUI_ - (Pick the UNICODE version for Windows 2000 and XP and - see Win_ 2000, NT if you have problems loading it.) - -+ Local copy of MacOS_ X binary tar ball - (This requires at least OS X 10.3) - -+ Debian instructions below... - -btw, HowToInstallServer_ sketches how to install a subversion server on Linux (not as easy as the client install). You don't need to install server side files to get your client going. - -Getting started ------------------ - -If you're just getting started with subversion, here's a simple how-to. -For complete information, you can go read the subversion guide_. - -**Download and install the appropriate installation file of subversion above.** - -For linux: - -download the tarball. unzip and untar it. Then type *./configure*. Then, as root, *make* followed by *make install*. Voilà ... a subversion client. - -For Debian users:: - - $ apt-get install subversion-tools - -People using Debian *stable* first need to add the following line to ``/etc/apt/sources.list`` (thanks backports_!):: - - deb http://fs.cs.fhm.edu/mirror/backports.org/debian stable subversion - -Note that you can always go look at the files online_ with your browser, located at: http://codespeak.net/svn/pypy/trunk -But, you'll want to check out your own local copies to work on. - -Check out and Check in ----------------------------- - -There are currently two directories you'll want to check out: /src and /doc -In order to get the sourcecode and docs downloaded onto your drive, open a shell or commandline and type:: - - $ svn co http://codespeak.net/svn/pypy/dist - $ svn co http://codespeak.net/svn/pypy/extradoc - -If you are behind a dump proxy this may or may not work; see below. - -Once you've got the files checked out to your own system, you can use your favorite text editor to change to files. Be sure to read the coding-style_ and other documentation files before doing a lot of work on the source code. Before doing any work, make sure you're using the most recent update with:: - - $ svn up - -this will update whichever subdirectory you're in (doc or src). - -When you're ready to **check in** a file, - -cd to your local checked out sourcecode directory, and if necessary, copy the file over from wherever you worked on it:: - - $ cp ~/mydir/filename.ext filename.ext - -If you're adding a brand-new file:: - - $ svn add filename.ext - -Then, to **commit** it:: - - $ svn ci -m "your comments about what changes your committing" - $ your password: (this may not be necessary) - -You'll see something like the following:: - - Adding goals/stringcomp.py - Transmitting file data . - Committed revision 578. - -or:: - - Sending coding-style.txt - Transmitting file data . - Committed revision 631. - -Check online on the check-in archives_ and you'll see your revision. Feel free to add a documentation file on any major changes you've made! See the readme_ on documentation for more info. - -Some other useful subversion tricks: --------------------------------------- - -**Be sure to remember ``svn`` in the commandline in the following commands.** - -``$ svn mv filename.ext`` - to move or rename a file - -``$ svn rm filename.ext`` - to remove (delete) a file - -``$ svn status`` - will let you know what changes you've made compared to the current repository version - -``$ svn revert filename.ext`` - will fix problems if you deleted or moved a file without telling svn. - -``$ svn cleanup`` - last resort to fix it if you've got a totally messed up local copy. - Use this if you see error messages about ``locked`` files that you can't fix otherwise. - -Circumventing proxies ----------------------------- - -Some proxies don't let extended HTTP commands through. If you have an -error complaining about a bad request, you can ask subversion to use the -alternate port 8080 to connect to codespeak.net by adding the following -lines in a file ``~/.subversion/servers`` (on Unix) or -``%APPDATA%\Subversion\servers`` (on Windows):: - - [groups] - codespeak = codespeak.net - - [codespeak] - http-proxy-host = codespeak.net - http-proxy-port = 8080 - --------------------------------------------------------------------------------- - - -.. _website: http://support.microsoft.com/default.aspx?scid=kb%3Ben-us%3B259403 -.. _GUI: http://tortoisesvn.tigris.org/servlets/ProjectDocumentList?folderID=616 -.. _MacOS: http://codespeak.net/~jum/svn-1.1.3-darwin-ppc.tar.gz -.. _versions: http://subversion.tigris.org/project_packages.html -.. _Win: http://www.microsoft.com/downloads/details.aspx?displaylang=en&FamilyID=4B6140F9-2D36-4977-8FA1-6F8A0F5DCA8F - -.. _guide: http://svnbook.red-bean.com/book.html#svn-ch-1 -.. _archives: http://codespeak.net/pipermail/pypy-svn/ -.. _online: http://codespeak.net/svn/pypy/trunk/ -.. _coding-style: http://codespeak.net/pypy/doc/coding-style.html -.. _readme: http://codespeak.net/pypy/doc/readme.html -.. _HowToInstallServer: http://codespeak.net/moin/pypy/moin.cgi/HowToInstallServer -.. _backports: http://www.backports.org diff --git a/pypy/documentation/index.txt b/pypy/documentation/index.txt deleted file mode 100644 index 020b27e64f..0000000000 --- a/pypy/documentation/index.txt +++ /dev/null @@ -1,88 +0,0 @@ -=================================================
-PyPy - a Python_ implementation written in Python
-=================================================
-
-recently-modified_
-
-.. _Python: http://www.python.org/dev/doc/maint24/ref/ref.html
-
-Here are some good entry points into PyPy's world:
-
- * architecture_:
- a technical overview of PyPy's current architecture
-
- * howtopypy_:
- provides some hands-on instructions for getting started,
- including a two-liner to run PyPy on your computer.
-
-
-Before doing pypy coding, you might also take a look at these
-developer-specific instructions:
-
- * coding-style_:
- covers pypy coding conventions
-
- * optionaltool_:
- there are some optional tools we use for pypy.
-
- * wrapping_:
- a description of the crucial distinction between application-level and
- interpreter-level objects (without understanding this you might
- have difficulties understanding PyPy's source code).
-
- * oscon2003-paper_:
- presentation to OSCON on what pypy is about and why you should care
-
- * testdesign_:
- pypy is a test-driven development project. Read here to find out
- more about how we're doing testing.
-
-Further reading / related projects
-----------------------------------
-
-* An interesting thread on an HP tech report that may be proof the pypy is feasible_ . (We already knew that...)
-
-* An interesting thread on why VHLL rock_ . (We already knew that too.)
-
-* A thread on Python in Scheme_ .
-
-* An intriguting project, FlashMob_ - creating an adhoc supercomputer.
-
-* A discussion on Python and lisp_ support
-
-* An interesting repository_ of papers by Xerox Parc members, with quite a few issues more or less relevant to PyPy.
-
-* A thread on the gnu lightning_ project."GNU lightning is a library that generates assembly language code at run-time; it is very fast, making it ideal for Just-In-Time compilers, and it abstracts over the target CPU, as it exposes to the clients a standardized RISC instruction set inspired by the MIPS and SPARC chips."
-
-* A project to create a Low Level Virtual Machine (LLVM_) and a PyPy-LLVM_ discussion, and conversation_ between PyPy and LLVM.
-
-* A thread discussing the xhelix_ python C extension implementing Helix encryption and authentication, which may be interesting to use as a pypy performance test at some point.
-
-* A paper for PyCon 2004: "IronPython_ is a new implementation of the Python language targeting the Common Language Runtime (CLR). It compiles python programs into bytecode (IL) that will run on either Microsoft's .NET or the Open Source Mono platform. IronPython includes an interactive interpreter and transparent on-the-fly compilation of source files just like standard Python. In addition, IronPython supports static compilation of Python code to produce static executables (.exe's) that can be run directly or static libraries (.dll's) that can be called from other CLR languages."
-
-* A comparison of Python and Pliant_ , an OS written in a python-like language.
-
-
-.. _architecture: http://codespeak.net/pypy/index.cgi?doc/architecture.html
-.. _oscon2003-paper: http://codespeak.net/pypy/index.cgi?doc/oscon2003-paper.html
-.. _howtopypy: http://codespeak.net/pypy/index.cgi?doc/howtopypy.html
-.. _readme: http://codespeak.net/pypy/index.cgi?doc/readme.html
-.. _wrapping: http://codespeak.net/pypy/index.cgi?doc/wrapping.html
-.. _coding-style: http://codespeak.net/pypy/index.cgi?doc/coding-style.html
-.. _howtosvn: http://codespeak.net/pypy/index.cgi?doc/howtosvn.html
-.. _optionaltool: http://codespeak.net/pypy/index.cgi?doc/optionaltool.html
-.. _testdesign: http://codespeak.net/pypy/index.cgi?doc/testdesign.html
-.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html
-.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html
-.. _Scheme: http://codespeak.net/pipermail/pypy-dev/2004q1/001256.html
-.. _FlashMob: http://www.flashmobcomputing.org/
-.. _lisp: http://codespeak.net/pipermail/pypy-dev/2003q4/001048.html
-.. _repository: http://www2.parc.com/csl/groups/sda/publications.shtml
-.. _lightning: http://codespeak.net/pipermail/pypy-dev/2003q4/001051.html
-.. _LLVM: http://llvm.cs.uiuc.edu/
-.. _PyPy-LLVM: http://codespeak.net/pipermail/pypy-dev/2003q4/001115.html
-.. _conversation: http://codespeak.net/pipermail/pypy-dev/2003q4/001119.html
-.. _xhelix: http://codespeak.net/pipermail/pypy-dev/2003q4/001129.html
-.. _IronPython: http://www.python.org/pycon/dc2004/papers/9/
-.. _pliant: http://pliant.cx
-.. _recently-modified: http://codespeak.net/pypy/index.cgi?doc/recent
diff --git a/pypy/documentation/multimethod.txt b/pypy/documentation/multimethod.txt deleted file mode 100644 index 5a422bc43d..0000000000 --- a/pypy/documentation/multimethod.txt +++ /dev/null @@ -1,254 +0,0 @@ -========================= -MultiMethods and Coercion -========================= - -Introduction ------------- - -A "multimethod" is the generalization of the OOP notion of "method". -Theoretically, a method is a "message name" and signature attached to a -particular base class, which is implementated in the class or its subclasses. -To do a "method call" means to send a message to an object, using a message -name and actual arguments. We call "message dispatch" the operation of -finding which actual implementation is suitable for a particular call. For -methods, a message is dispatched by looking up the class of the "self" object, -and finding an implementation in that class, or in its base classes, in a -certain order. - -A multimethod is a message name and signature that can have implementations -that depend not only on the class of the first "self" argument, but on the -class of several arguments. Because of this we cannot use Python's nice model -of storing method implementations as functions, in the attributes of the -class. - -Here is a common implementation of multimethods: they are instances of a -specific MultiMethod class, and the instances are callable (there is a -__call__ operator on MultiMethod). When a MultiMethod is called, a dispatch -algorithm is used to find which, among the registered implementations, is the -one that should be called; this implementation is then immediately called. The -most important difference with normal methods is that the MultiMethod object -to call is no longer syntactically attached to classes. In other words, -whereas a method is called with ``obj.somemethod(args)``, a multimethod is -called much like a function, e.g. ``dosomething(obj1, obj2, obj3...)``. You -have to find the MultiMethod object ``dosomething`` in some namespace; it is -no longer implicitely looked up in the namespace of the "self" object. - -In PyPy the MultiMethod objects are stored in the object space instance, thus -``space.add`` is the name of a MultiMethod. The ``space`` argument is not -used for the dispatch, but just as a convenient place to put the MultiMethod -object. - - -Concept Trees -------------- - -A multimethod is a "signature" and a set of "implementations", which are -regular Python functions. The difficulty is to figure out an algorithm that -should be used to know, given actual argument values, which implementation -should be called. - -This algorithm should only depend on the types of the arguments. For -explicitness we will *not* use the Python class inheritance at all (because it -is better suited to normal methods, and anyway it can be emulated if needed). -Thus the following diagram looks like inheritance diagrams, but it is actually -just an explicitely specified tree:: - - Value - \ - \ - Float - / \ - / \ - Integer \ - / \ F1 - / \ - I1 I2 - -This diagram contains three Python types I1, I2 and F1, and three "concepts" -Integer, Float and Value, which are just names. The types I1 and I2 could be -two different ways to implement integers (e.g. machine-sized ints, and -variable-sized longs), and the type F1 could be the IEEE doubles. Note that in -this model the real Python types are the "leaves" of the tree. - -Consider the multimethod ``add(Value, Value)``. The signature specifies that -the two arguments should be two Values (remember that Value is not a type, -just some "concept"; you cannot ask whether a Python object is a Value or -not). Suppose that this multimethod has got three implementations: -``add(I1,I1)``, ``add(I1,I2)`` and ``add(F1,F1)``. If you call ``add(t1,t2)`` with -two objects of type ``I1``, the first implementation is used; if you call it -with a ``I1`` and a ``I2``, the second one is used; and if you call it with two -``F1``, the third one is used. - -But if you call it with another pattern of types, there is no direct match. -To be able to satisfy the call, at least one of the arguments will have to be -converted to another type. This is where the shape of the tree becomes -important. Remember that the signature of the multimethod is ``add(Value, -Value)``. The two ``Value`` here mean that conversions are allowed inside of -the part of the tree that is below ``Value``. (This is the tree shown above; -maybe there are other "concepts" above ``Value`` outside the picture, but they -are ignored.) The intuitive meaning of the signature is: "add() is an -operation between two Values". It allows an object of type I1 to be converted -into an object of type I2 or F1 and vice-versa, as long as the objects have the -same "Value", in an intuitive sense. An example of conversion that destroys -the Value would be casting a I1 object into an instance of ``object``, which is -a parent class of any Python class. - - -Conversion ----------- - -All conversions that don't change the "Value" of an object can be registered -as Python functions. For example:: - - def f(x): - return F1(x) - -might be a conversion from I1 to F1. But we can say more: this conversion is -more precisely defined at the level of "Float" in the tree above. -Similarily, a conversion from I1 to I2 would probably be defined at the -"Integer" level. - -Suppose that we have defined these two conversions, from I1 to F1 and -from I1 to I2. Suppose now that we call ``add(i,f)`` where ``i`` is of -type I1 and ``f`` is of type F1. Clearly, we want ``i`` to be converted -into a F1, which allows ``add(Value, Value)`` to be called and -dispatched to the ``add(F1,F1)`` implementation. To find this -conversion, the algorithm looks into the subconcepts of ``Value``, -starting from the leaves and going back to the higher levels, until it -can satisfy the call request by inserting conversions registered at -these levels. - -Starting from the lower levels and going up allows the tree to prioritize the -solutions: it is better to try to convert between Integers, and only if that -fails, to try to convert at the Float level, which might promote integer -objects into floats. - - -Multimethod signature ---------------------- - -The signature ``add(Value, Value)`` of the multimethod is essential to specify -that conversions are indeed allowed for the addition. In other multimethods, -some arguments might play different roles. Consider a multimethod for -"in-place addition": as this operation might mutate its first argument, it -must never be automatically converted. This is expressed by saying that the -signature of this multimethod is ``inplace_add(Identity, Value)`` where -``Identity`` is another concept that intuitively means "a Python object whose -identity is important". ``Identity`` would not appear in a tree, or if it -would, it would not have conversions between its subtypes. - -Note how a real Python object of type I1 can either be an "Integer" or an -"Identity" depending on its role in a multimethod call. This is why we cannot -use normal inheritance as the (global) conversion tree: which tree to use -depends on the role of the argument, which changes in different positions of -different multimethods. - -This approach is general enough to allow arguments to play very different -roles. For example, the same mecanisms could be used for an argument that -must be an integer: the multimethod signature would specify ``Integer`` -instead of ``Value``. It still allows conversion between integer -representations, but not from float to int. - -In PyPy, some "concepts" are by convention tied to the application-level -types: ``Integer`` corresponds to the application-level ``class int``, -and ``Float`` to ``class float``. The I1, I2 and F1 types are -interpreter-level implementations, and they are normally not visible at -application-level. It is then natural to define what the method -``int.__add__(self, other)`` should do: it should require an ``Integer`` -as its first argument, but it could be I1 or I2 -- appropriate conversions -can be done, as long as we don't convert ``self`` outside the realm of -``Integer``. - - -Conversion multimethods ------------------------ - -As conversion functions are linked to levels in the tree, and there can be -several conversions for each level, they are much like multimethods -themselves. In other words, for each "concept" (Value, Float, Integer) we can -introduce a multimethod (valueconv, floatconv, integerconv) with the -corresponding signature (``valueconv(Value)``, ``floatconv(Float)``, -``integerconv(Integer)``). Specific conversion functions are implementations -of one of these multimethods. For example, if ``g`` is a I1-to-I2 conversion, -it is an implementation of ``integerconv(Integer)``, with the type ``g(I1)``. - -The job of the multimethod dispatcher algorithm is to insert the appropriate -implementations of the allowed ``xxxconv(Xxx)`` multimethods until the call -can be satisfied. - -A nice point of view is that these conversion multimethod are identity -functions (i.e. functions that do nothing, and return their argument -unmodified): ``integerconv(Integer)`` is the abstract function that takes an -Integer and just returns it; an implementation like ``g(I1)`` actually takes a -I1 and returns a I2, which is different, but when you look at it abstractedly at -the Integer level, you just see an Integer input argument and the same Integer -result. - - -Algorithm ---------- - -Here is a suggested algorithm. Roughly, it assumes that arguments with the -same abstract signature (e.g. ``Value`` in ``add(Value, Value)``) work -together, but arguments with distinct signature are independent. - -Assume that the signature of a multimethod is ``m(C1,...,Cn)``, and we want to -dispatch the call ``m(A1,...,An)``, where the arguments have types -``T1,...,Tn`` respectively. Each type ``Ti`` must appear in the subtree below -``Ci``, otherwise it is a TypeError. - -We use a single set S of types and concepts, which will grow until it is large -enough to contain the appropriate conversion functions:: - - S = { } # empty set - sortedmatches = [] - while 1: - find_matches_in(S) - i = the largest integer in {1,...,n} such that Ci not in S - or break if there isn't any such i any more - C = the first item in order(Ci) such that C not in S - add C into S - also add into S the whole subtree of C - -where ``order(C)`` is a kind of "method resolution order" of everything -*under* ``C`` (instead of *over* ``C`` for Python's MRO). For example, -following Python 2.2:: - - def order(C): - lst = [] - for j in range(n,0,-1): - if Tj in subtree(C): - lst += [Tj, parent(Tj), parent(parent(Tj)), ..., C] - for each D that appears more than once in lst: - remove all but the last occurrence of D in lst - return lst - -The algorithm in Python 2.3 is slightly different, and could probably be used -instead, though the difference should not be significant for the kind of trees -we are using. - -Finally:: - - def find_matches_in(S): - matches = list_matches_in(S) - remove from matches the items already in sortedmatches - if len(matches) > 1: - warning ambiguity, or maybe use performance hints - sortedmatches += matches - - def list_matches_in(S): - conv = { implementations of the conversion multimethod of C - for C in S } - combine in all possible ways the functions in conv to change - the types T1,...,Tn into the types U1,...,Un of an - implementation of the multimethod to call - -The resulting ``sortedmatches`` list contains, in preference order, the -implementations that are available to be dispatched to. We generally just -call the first one, but it (or any later one) may raise FailedToImplement; in -this case the dispatcher tries the next one. - -The rest of the algorithm are implementation and performance tricks, e.g. it -should try to call a given conversion function only once and remember the -value of the converted argument in case we need it again after a -FailedToImplement. diff --git a/pypy/documentation/newrepolayout.txt b/pypy/documentation/newrepolayout.txt deleted file mode 100644 index 6a49439cbe..0000000000 --- a/pypy/documentation/newrepolayout.txt +++ /dev/null @@ -1,75 +0,0 @@ -=========================================== -Proposition: new svn repository layout -=========================================== - -Motivation ----------- - -The `PyPy repository layout`_ has evolved for two -years into something that needs some refactoring -to become more practical. - -For example, the `extradoc`_ directory was originally intended -to hold developer documentation but nowadays it contains -funding, negotiations, mails and misc-other-stuff documents. It is -not easy and obvious anymore to know which files are relevant. -Moreover, `extradoc`_ is too far away from the source code: -developers currently checkout the 'dist' directory and -don't even get the documentation. This also makes it more -difficult to keep the documentation up-to-date. - -.. _`extradoc`: http://codespeak.net/svn/pypy/extradoc -.. _`PyPy repository layout`: http://codespeak.net/svn/pypy/ - -New repo layout proposal Nr. 1 ------------------------------- - -Based on experiences in various repositories here is a -new proposition describing a possible new structure -(starting at /svn/pypy):: - - branch # holds branches - tag # holds tagged dist-versions - - dist # holds current development - pypy # current dist/pypy - documentation # developer documentation (inside pypy!) - py # and other 'externals' - setup.py # should be there at some point - README.txt # tell how to run PyPy, the translator, tests - LICENSE.txt # copyright notices for tree parts including pypy - - extradoc # non-dist documentations (papers etc.pp.) - talk # various pypy-talks - paper # various pypy-related papers (including our own) - sprint # sprint related information (reports etc.pp.) - irclog # IRC logs (snipped appropriately) - - www # website-related stuff (needs its own reconsideration) - - funding # funding related documents - eu-info # official information from the EU - contract # contract material (EU-contract, amendments) - eu-report # eu reports (time/cost/budget) - ... # probably more, we'll see later - -The idea is that developers can use a simple url:: - - svn co https://codespeak.net/svn/pypy/dist dist-pypy - -in order to get everything neccessary for sourcecode, documentation -and test development. Obviously, if you care about the funding -or web site application/contents you can do an appropriate checkout -as well. - -Note, that having documentation inside the source tree will help -with keeping a closer eye on documentation - especially when we -have special ref-integrity tests for the documentation (which itself -should reference real source-code/functions at some point). For -example, the refactoring of unitest.py-style tests to `py.test`_ based ones -"forgot" to modify our test-documentation in the too-far-away doc-folder. -We should move to a scheme where such an omission will raise real -test errors. - -.. _`py.test`: http://codespeak.net/py/current/doc/test.html - diff --git a/pypy/documentation/objspace.txt b/pypy/documentation/objspace.txt deleted file mode 100644 index 5cd62cf801..0000000000 --- a/pypy/documentation/objspace.txt +++ /dev/null @@ -1,39 +0,0 @@ -===================== -PyPython ObjectSpace -===================== - -See ObjectSpaceInterface_ for a draft version of the interface specification to ObjectSpace objects. ------------------------------------------------------------------------------------------------------- - -In a Python-like language, a running interpreter has three main parts: - -+ the compiler, which represents the static optimization of the source code into an intermediate format, the bytecode; -+ the object library, implementing the various types of objects and their semantics; -+ the main loop, which shuffles data around and calls the operations defined in the object library according to the bytecode. - -The main loop generally assumes little about the semantics of the objects: they are essentially black boxes (PyObject pointers). The interpreter stack and the variables only contain such black boxes. Every operation is done via calls to the object library, like PyNumber_Add(). - -In PyPy, the three parts are clearly separated and can be replaced independently. I will discuss below the reasons I think it is very useful to have, in particular, the object library be replaced. We call object space such an object library. The standard object space is the object space that works just like Python's, that is, the object space whose black boxes are real Python objects that work as expected. We will call wrapped objects the black boxes of an object space. - -Here are four examples of object spaces: - -- TrivialObjectSpace_ -- StandardObjectSpace_ -- AbstractObjectSpace_ -- AnnotateObjectSpace_ - -(We should also talk about turning the compiler and the main loop into explicit concepts, as Rocco discussed in pypy-dev.) - -I believe that the above examples should give a hint at why we might really want object spaces that behave differently: this is called abstract interpretation in the literature. But there are also more funny ways to use object spaces, for example: - -+ running with several object spaces at once, to clearly separate the data into domains -+ proxy object spaces, which forward all requests over a network to a "Python object server" -+ and more... there have been numerous ideas in pypy-dev. - ------------- - -.. _ObjectSpaceInterface: objspaceinterface.html -.. _TrivialObjectSpace: trivialobjspace.html -.. _StandardObjectSpace: stdobjspace.html -.. _AbstractObjectSpace: abstractobjspace.html -.. _AnnotateObjectSpace: annotateobjspace.html diff --git a/pypy/documentation/objspaceinterface.txt b/pypy/documentation/objspaceinterface.txt deleted file mode 100644 index 5dbf38e34d..0000000000 --- a/pypy/documentation/objspaceinterface.txt +++ /dev/null @@ -1,124 +0,0 @@ -================================ -PyPython ObjectSpaceInterface -================================ - -This is a draft version of the ObjectSpace_ interface. It is still evolving, although the public interface is not evolving as much as the internal interface that subclasses need to know about. This document now generally refers to the public interface; for subclassing you need to poke at the code in detail anyway. - - -class baseobjspace.ObjSpace -=========================== - -Administrative Functions ----------------------------- - -**initialize():** - Function which initializes w_builtins and the other w_constants. - -**getexecutioncontext():** - Return current active execution context. - -Operations on Objects in ObjectSpace ------------------------------------------ - -These functions both take and return "wrapped" objects. - -*The following functions implement the same operations as those in CPython:* - -``id, type, issubtype, iter, repr, str, len, hash,`` - -``getattr, setattr, delattr, getitem, setitem, delitem,`` - -``pos, neg, not_, abs, invert, add, sub, mul, truediv, floordiv, div, mod, divmod, pow, lshift, rshift, and_, or_, xor,`` - -``hex, oct, int, float, ord,`` - -``lt, le, eq, ne, gt, ge, contains,`` - -``inplace_add, inplace_sub, inplace_mul, inplace_truediv, inplace_floordiv, inplace_div, inplace_mod, inplace_pow, inplace_lshift, inplace_rshift, inplace_and, inplace_or, inplace_xor,`` - -``get, set, delete`` - -**next(w):** - Call the next function for iterator w, or raises a real NoValue. - -**call(w_callable, w_args, w_kwds):** - Call a function with the given args and keywords. - -**call_function(w_callable, *args_w, **kw_w):** - Convenience function that collects the arguments in a wrapped tuple and dict and invokes 'call()'. - -**is_(w_x, w_y):** - Implements 'w_x is w_y'. (Returns a wrapped result too!) - -**isinstance(w_obj, w_type):** - Implements 'issubtype(type(w_obj), w_type)'. (Returns a wrapped result too!) - -**exception_match(w_exc_type, w_check_class):** - Checks if the given exception type matches 'w_check_class'. Used in matching the actual exception raised with the list of those to catch in an except clause. (Returns a wrapped result too!) - -Creation of Application Level objects ---------------------------------------- - -**wrap(x):** - Returns a wrapped object that is a reference to the interpreter-level object x. This can be used either on simple immutable objects (integers, strings...) to create a new wrapped object, or on complex mutable objects to obtain an application-level-visible reference to them (e.g. instances of internal interpreter classes). - -**newbool(b):** - Creates a Bool Object from an interpreter level object. - -**newtuple([..]):** - Take an interpreter level list of wrapped objects. - -**newlist([..]):** - Takes an interpreter level list of wrapped objects. - -**newdict([..]):** - Takes an interpreter level list of interpreter level pairs of wrapped key:wrapped value entries (and NOT an interpreter level dictionary!). - -**newslice(w_start, w_end, w_step):** - Makes a new slice object. - -**newstring(asciilist):** - Creates a string from a list of wrapped integers. - -Conversions from Application Level to Interpreter Level ----------------------------------------------------------- - -**unwrap(w_x):** - Return Interpreter Level equivalent of w_x - -**is_true(w_x):** - Return a interpreter level bool (True or False). - -**unpackiterable(w_iterable, expected_length=None):** - Unpack an iterable object into a real (interpreter level) list. Raise a real ValueError if the expected_length is wrong. - -**unpacktuple(w_tuple, expected_length=None):** - Same as unpackiterable(), but only for tuples. - - -Data Members ------------------ - -+ self.w_builtins -+ self.w_modules -+ self.w_None: The ObjectSpace's None -+ self.w_True: The ObjectSpace's True -+ self.w_False: The ObjectSpace's False -+ self.w_Ellipsis: The ObjectSpace's Ellipsis -+ self.w_NotImplemented: The ObjectSpace's NotImplemented - -+ ObjSpace.MethodTable: - List of tuples (method name, symbol, number of arguments, list of special names) for the regular part of the interface. (Tuples are interpreter level.) - -+ ObjSpace.BuiltinModuleTable: - List of names of built-in modules. - -+ ObjSpace.ConstantTable: - List of names of the constants that the object space should define - -+ ObjSpace.ExceptionTable: - List of names of exception classes. - ---------------------------- - -.. _ObjectSpace: objspace.html diff --git a/pypy/documentation/optionaltool.txt b/pypy/documentation/optionaltool.txt deleted file mode 100644 index 678611bdf2..0000000000 --- a/pypy/documentation/optionaltool.txt +++ /dev/null @@ -1,33 +0,0 @@ -Optional Tools for running (and hacking) PyPy
-=============================================
-
-In addition to the latest version of svn and Python, we use some optional
-tools to install for working on pypy. They are not required to run all the
-tests or to get an interactive PyPy prompt but they help to understand and
-debug PyPy.
-
-Recommended tools:
-------------------
-
-* graphviz::
-
- (used for visualizing the control-flow)
- http://www.research.att.com/sw/tools/graphviz/download.html_
-
-* pygame::
-
- (to visualize control flow and annotation analysis of python programs)
- http://www.pygame.org/download.shtml_
-
- - on MAC OSX, TRY THIS::
- Alex will reconstruct the process for us
-
-Optional tools:
----------------
-
-* CLISP::
-
- Optional (if you want to work on generating lisp code from pypy)
- http://clisp.cons.org/_
-
-
diff --git a/pypy/documentation/restrictedpy.txt b/pypy/documentation/restrictedpy.txt deleted file mode 100644 index 35493133d5..0000000000 --- a/pypy/documentation/restrictedpy.txt +++ /dev/null @@ -1,238 +0,0 @@ -================== -Restricted Python -================== - -We are writing a Python interpreter in Python, using Python's well known ability -to step behind the algorithmic problems as language. At first glance, one might -think this achieves nothing but a better understanding for everybody how the -interpreter works. This alone would make it worth doing, but we have much larger -goals. - - -CPython vs. PyPy -------------------- - -Compared to the CPython implementation, Python takes the role of the C Code. So -actually, we describe something by Python, which has been coded in C already, -with all the restrictions that are implied by C. We are not trying to make the -structures of the CPython interpreter more flexible by rewriting things in C, -but we want to use Python to give an alternative description of the interpreter. - -The clear advantage is that this description is probably shorter and simpler to -read, and many implementation details vanish. The drawback of this approach is -that this interpreter will be unbearably slow. - -To get to a useful interpreter again, we need to apply some mappings to the -implementation, later. One rather straight-forward is to do a whole program -analysis of the PyPy interpreter and create a C source, again. There are many -other ways, but let's stick with the easiest approach, first. - -In order to make a C code generator simple, we restrict ourselves to a subset of -the Python language, and we adhere to some rules, which make code generation -obvious and easy. - - -Restricted Python is Runtime Python -------------------------------------- - -Restricted Python describes a runnable Python interpreter implementation. This -is a quite static object that can be suitably described by RPython. But the -restrictions do not apply during the startup phase. - - -PyPy Bootstrap -------------------- - -When the PyPy interpreter is started as a CPython program, it can use all of -CPython for a while, until it reaches runtime. That is, all executable code will -be executed using the full power of Python. - -An example can be found in the implementation, which is quite elegant: For the -definition of all the opcodes of the Python interpreter, the module dis is -imported and used. This saves us from adding extra modules to PyPy. The import -code is run at startup time, and we are allowed to use the CPython builtin -import function. - -When the startup code is done, all resulting objects, functions, code blocks -etc. must adhere to the runtime restrictions. All initialized modules are -written out in a persistent manner. Our current idea is to emit a huge C source -file which contains everything created so far. During this process, a whole -program analysis is performed, which makes use of the restrictions defined in -RPython. This enables the code generator to emit efficient replacements for pure -integer objects, for instance. - - -RPython Definition --------------------- - -It might make sense to define a sub-language of Python called RPython, with the -restrictions depicted below. This is an evolving topic, and we're just -collecting things which come up during trying to code the interpreter, so this -is no language at all, but an arbitrary set of rules, which are about to be -changed all day. - - -Object restrictions -------------------------- - -We are using - -**variables** - - the same variable in the same context can receive values of different types, - at a possible overhead cost. For example, a variable that can contain a - wrapped object or None is efficiently implemented as a PyObject* pointer that - can be NULL, but a variable that can contain either an integer or a float must - be implemented as a union with a type tag in C. - -**constants** - - all module globals are considered constants. - -**integer, float, string, boolean** - - avoid string methods and complex operations like slicing with a step - -**tuples** - - no variable-length tuples; use them to store or return pairs or n-tuples of - values - -**lists** - - lists are used as an allocated array; list.append() does naive resizing, so as - far as possible use list comprehensions (see below). list.extend() or the += - operator are allowed and efficient. Unless there is really a use case for it, - repetition is limited to initialization purposes: '[single_value] * length'. - -**dicts** - - dicts with string keys only (preferrably the kind of strings that are usually - interned in CPython, i.e. short strings that look like identifiers). The - implementation could safely decide that all dict keys should be interned. - -**control structures** - - all allowed - -**list comprehensions** - - may be used to create allocated, initialized array. the array size must be - computable in advance, which implies that we don't allow an if clause. - -**functions** - -+ statically called functions may use defaults and a variable number of - arguments (which may be passed as a list instead of a tuple, so write code that - does not depend on it being a tuple). - -+ dynamic dispatch enforces use of very simple signatures, equal for all - functions to be called in that context. At the moment, this occurs in the opcode - dispatch, only. - -**builtin functions** - - A few builtin functions will be used, while this set is not defined - completely, yet. Some builtin functions are special forms: - -**range** - - does not create an array. It is only allowed in for loops. The step argument - must be a constant. - -**len** - -+ may be used with basic types that have a length. But len is a special form - that is recognized by the compiler. - -+ If a certain structure is never touched by len, the compiler might save the - length field from the underlying structure. - -``int, float, ord, chr``... are available as simple conversion functions. - -``int, float, str``... have a special meaning as a type inside of isinstance only. - -**classes** - -+ methods and other class attributes do not change after startup -+ inheritance is supported -+ classes are first-class objects too - -**exceptions** - -+ fully supported -+ see below for restrictions on exceptions raised by built-in operations - -**objects** - - wrapped objects are borrowed from the object space. Just like in CPython, code - that needs e.g. a dictionary can use a wrapped dict and the object space - operations on it. - -This layout makes the number of types to take care about quite limited. - - -Example: Integer Types -------------------------- - -While implementing the integer type, I (Chris) stumbled over the problem, that -integers are quite in flux in CPython right now. Depending on the version, -integers either raise an overflow exception or mutate into longs on overflow. -Also, shifting right now truncates (up to 2.3) but is going to extend to longs -as well. In order to enable us to run the restricted Python stuff in CPython, I -needed to use a derived class r_int(int), which always behaves the same: Never -leaving its domain, but always staying an integer. - -The r_int type is implemented in a pervasive way: Every operation that involves -an r_int creates an r_int as the result. Therefore, the implementation is not -cluttered with special type casts. Just the initial value of an emulated -integer's intval field is initialized by obj.intval = r_int(val) . This way, the -r_int type propagates itself through all operations without extra effort of the -programmer. - -This concept looks promising, and since we will need unsigned integers which do -not overflow as well, I also created r_uint. It is always a word-sized unsigned -integer and never overflows. This will be a building block for things like -computing hash values, where wrap-around effects are intended and should be -easily coded without lots of explicit mask shuffling. - -Now I think to extend this even more and build a full set of primitive types, -which are intended to - -+ define the semantics of the primitive type -+ give a working implementation for unrestricted Python - -These primitive types can later easily be augmented with methods to emit C code instead of executing. I guess this would be implemented in an extra ObjectSpace. - - -Exception rules ---------------------- - -Exceptions are by default not generated for simple cases.:: - - - #!/usr/bin/python - - x = 5 - x = x + 1 # this code is not checked for overflow - - try: - x = x + y - except OverflowError: - # implement using longs - - -Code with no exception handlers does not raise exceptions. By supplying an -exception handler, you ask for error checking. Without, you assure the system -that the operation cannot overflow. - -Exceptions explicitly raised will always be generated. - - -Testing ------------- - -Besides extra tests which have to be written, PyPy has the advantage that it is -runnable on standard CPython. That means, we can run all of PyPy with all -exception handling enabled, so we might catch cases where we failed to adhere to -our implicit assertions. diff --git a/pypy/documentation/stdobjspace.txt b/pypy/documentation/stdobjspace.txt deleted file mode 100644 index d4b4d2dbee..0000000000 --- a/pypy/documentation/stdobjspace.txt +++ /dev/null @@ -1,28 +0,0 @@ -========================= -StandardObjectSpace -========================= - -The StandardObjectSpace is the direct equivalent of CPython's object library (the "Objects/" subdirectory in the distribution). It is an implementation of the common Python types in a lower-level language. - -The StandardObjectSpace defines an abstract parent class, W_Object, and a bunch of subclasses like W_IntObject, W_ListObject, and so on. A wrapped object (a "black box" for the interpreter main loop) is thus an instance of one of these classes. When the main loop invokes an operation, say the addition, between two wrapped objects w1 and w2, the StandardObjectSpace does some internal dispatching (similar to "Object/abstract.c" in CPython) and invokes a method of the proper W_XyzObject class that can do the operation. The operation itself is done with the primitives allowed by RestrictedPython. The result is constructed as a wrapped object again. For example, compare the following implementation of integer addition with the function "int_add()" in "Object/intobject.c": :: - - def int_int_add(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = x + y - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer addition")) - return W_IntObject(z) - -Why such a burden just for integer objects? Why did we have to wrap them into W_IntObject instances? For them it seems it would have been sufficient just to use plain Python integers. But this argumentation fails just like it fails for more complex kind of objects. Wrapping them just like everything else is the cleanest solution. You could introduce case testing wherever you use a wrapped object, to know if it is a plain integer or an instance of (a subclass of) W_Object. But that makes the whole program more complicated. The equivalent in CPython would be to use PyObject* pointers all around except when the object is an integer (after all, integers are directly available in C too). You could represent small integers as odd-valuated pointers. But it puts extra burden on the whole C code, so the CPython team avoided it. - -In our case it is a later optimization that we could make. We just don't want to make it now (and certainly not hard-coded at this level -- it could be introduced by the C translators that we will eventually write). So in summary: wrapping integers as instances is the simple path, while using plain integers instead is the complex path, not the other way around. - -Note that the current StandardObjectSpace_ implementation uses MultiMethod_ dispatch instead of the complex rules of "Object/abstract.c". I think that this can be translated to a different low-level dispatch implementation that would be binary compatible with CPython's (basically the PyTypeObject structure and its function pointers). If compatibility is not required it will be more straightforwardly converted into some efficient multimethod code. - ---------------------------------------------------------------------------- - -.. _StandardObjectSpace: stdobjspace.html -.. _MultiMethod: multimethod.html diff --git a/pypy/documentation/testdesign.txt b/pypy/documentation/testdesign.txt deleted file mode 100644 index 123d513074..0000000000 --- a/pypy/documentation/testdesign.txt +++ /dev/null @@ -1,138 +0,0 @@ -============= -Test Design -============= - -Our tests are based on the unittest framework. All tests of modules -in a directory usually reside in a subdirectory **test**. There are -basically two types of unit tests: - -- **Interpreter Level tests**. They run at the same level as PyPy's - interpreter. - -- **Application Level tests**. They run at application level which means - that they look like straight python code but they are interpreted by PyPy. - -Both types of tests need an objectspace they can run with (the interpreter -dispatches operations on objects to an objectspace). If you run a test you -can usually give the '-S' or '-T' switch for using the Standard or the Trivial -ObjectSpace respectively. - -Writing a test --------------- - -The best reference is to go to some test files and look how they are done. -Almost all tests start with the following lines:: - - import autopath - from pypy.tool import test - -of which the first line determines the package path automatically by searching -for **pypy** among the parents of the test-file's directory. The second -line imports PyPy's test-hook of which ''test.main()'' is the most important -and is usually the last line of the testfile. Further below you will find -some preliminary information about a new test module that was recently -developed at the Amsterdam Sprint (Dec. 2003) and still is under development. - -Note that we are using a unittest2 framework (but not for long) - -Command line tool test_all --------------------------- - -You can run almost all of PyPy's tests by invoking:: - - python test_all.py - -which will run all tests against the Standard Object Space. If you want -to test against the TrivialObjectSpace then issue:: - - python test_all.py -T - -For more switches invoke "python test_all.py -h". - -This used to be reversed -- the TrivialObjectSpace was the default, and -you had to specify the StandardObjectSpace. You may find some documentation -that has not been changed to reflect the new reality. - -------------------------------------------------------------------------------- - -.. _mail: http://codespeak.net/pipermail/pypy-dev/2003q2/000789.html - -New unit testing framework (not ready yet) ------------------------------------------- - -Current implementation -~~~~~~~~~~~~~~~~~~~~~~ - -The following picture is an UML class diagram of the framework. This -diagram is taken from the source code, which can currently be found -at src/pypy/tool/newtest.py as of 2003-12-19. (Most probably, the name -of the file will change, however.) - -:: - - +------------+ 1 1 +----------+ - | TestResult |----------------------------------------| TestItem | - | (abstract) | +----------+ - +------------+ | * - A | - - | - | | 1 - +----------------------------+-----------+ +-----------+ - | | | | TestSuite | - +-------------------------+ +---------+ +---------+ +-----------+ - | TestResultWithTraceback | | Success | | Skipped | A - | (abstract) | +---------+ +---------+ | - +-------------------------+ | loaded by - A A | - - - | * - | | +------------+ - | | | TestCase | - +-------+ +---------+ | (abstract) | - | Error | | Failure | +------------+ - +-------+ +---------+ A - - - | - | - concrete test - case classes - -Like the unittest framework of Python, our framework implements -tests as test methods in TestCase classes. Custom test case classes -derive from the shown TestCase class, defined in this module. As in -Python's unittest, a test case class can contain setUp and tearDown -methods. Additionally, it contains a method 'skip' which can be -called to stop a test prematurely. This won't be counted as a failure -or error. - -Test cases are loaded by a TestSuite class via the method init_from_dir. -This method will read all test modules in and below a specified -directory, inspect them for classes derived from TestCase (i. e. *our* -TestCase), and in turn inspect them for test methods (like in -unittest, all methods which start with "test"). - -For every found method, TestSuite will store its module, class and -unbound method objects in a TestItem object. There are also other -properties stored, e. g. the source code for each method and its -docstring. - -When the TestSuite's method 'run' is called, all collected TestItems -are run and, according to the outcome of the test, a TestResult object -is generated which holds a reference to "its" TestItem object. - -The TestResult classes Success and Skipped denote a passed test. A -skipped test means that not all test code has been run (and no error -or failure occurred before the skip method was called). If a test -fails, resulting in a Failure object, a test, e. g. tested with -assertEqual, has failed. An Error object is generated if something -else causes an unforeseen exception to be raised. - -Outlook -~~~~~~~ - -To be directly usable for the PyPy project, the selection of specific -object spaces has to be implemented. Moreover, the existing test -modules have to be converted. Because the TestCase class interface -remains compatible, this should only involve changes which can be -performed automatically, that is, changing the import statement for -this test module and the base class of test cases. - diff --git a/pypy/documentation/trivialobjspace.txt b/pypy/documentation/trivialobjspace.txt deleted file mode 100644 index b85488aa5b..0000000000 --- a/pypy/documentation/trivialobjspace.txt +++ /dev/null @@ -1,17 +0,0 @@ -================================= -PyPython Trivial Object Space -================================= - -The easiest way to implement an Object Space is to represent a Python object with itself. - -A PyPy interpreter using the Trivial Object Space is an interpreter with its own main loop (written in Python), but this main loop manipulates real Python objects and all operations are done directly on the Python objects. For example, "1" really means "1" and when the interpreter encounters the BINARY_ADD bytecode instructions the TrivialObjectSpace will just add two real Python objects together using Python's "+". The same for lists, dictionaries, classes... We just use Python's own. - -Of course you cannot do this if the goal is to write PyPy in such a way that it doesn't rely on a real underlying Python implementation like CPython. Still, it works, and it has its own uses, like testing our interpreter, or even interpreting a different kind of bytecode -- for example, it could probably be used to emulate generators in any Python version. (That would be quite slow, however.) - -(This is already done; it is funny to watch "dis.dis" disassembling itself painfully slowly :-) ) - -See StandardObjectSpace_ for the rest of the story. - -------------------------------------------------------------------- - -.. _StandardObjectSpace: stdobjspace.html diff --git a/pypy/documentation/wrapping.txt b/pypy/documentation/wrapping.txt deleted file mode 100644 index 94e015fdad..0000000000 --- a/pypy/documentation/wrapping.txt +++ /dev/null @@ -1,95 +0,0 @@ -========================= - Wrapping rules -========================= - -Introduction ------------- - -PyPy is made of Python source code at two levels: there is on the one hand *application-level code* that looks like normal Python code, and that implements some functionalities as one would expect from Python code (e.g. one can give a pure Python implementation of some built-in functions like ``zip()``). There is also *interpreter-level code* for the functionalities that must more directly manipulate interpreter data and objects (e.g. the main loop of the interpreter, and the various object spaces). - -Application-level code doesn't see object spaces explicitely: it runs using an object space to support the objects it manipulates, but this is implicit. There is no need for particular conventions for application-level code. The sequel is only about interpreter-level code. (Ideally, no application-level variable should be called ``space`` or ``w_xxx`` to avoid confusion.) - - -Naming conventions ------------------- - -* ``space``: the object space is only visible at interpreter-level, where it is by convention in a variable called ``space``. - -* ``w_xxx``: any object seen by application-level code is an object explicitely managed by the object space. From the interpreter-level point of view, this is called a *wrapped* object. The ``w_`` prefix is used for any type of application-level object. - -* ``xxx_w``: an interpreter-level container for wrapped objects, for example a list or a dict containing wrapped objects. Not to be confused with a wrapped object that would be a list or a dict: these are normal wrapped objects, so they use the ``w_`` prefix. - - -Operations on ``w_xxx`` ------------------------ - -The core interpreter considers wrapped objects as black boxes. It is not allowed to inspect them directly. The allowed operations are all dependent on the object space: they are called ``space.xxx()``, where ``xxx`` is a standard operation name (``add``, ``getattr``, ``call``, ``eq``...). The list of standard operations is found in the large table near the end of ``pypy.interpreter.baseobjspace``. These operations take wrapped arguments and return a wrapped result (or sometimes just None). - -Also note some helpers: - -* ``space.call_function(w_callable, ...)``: collects the given (already-wrapped) arguments, builds a wrapped tuple for them, and uses ``space.call()`` to perform the call. - -* ``space.call_method(w_object, 'method', ...)``: uses ``space.getattr()`` to get the method object, and then ``space.call_function()`` to invoke it. - - -Building ``w_xxx`` objects --------------------------- - -From the core interpreter, wrapped objects are usually built as the result of an object space operation. The ways to directly create a wrapped object are: - -* ``space.wrap(x)``: returns a wrapped object containing the value ``x``. Only works if ``x`` is either a simple value (integer, float, string) or an instance of an internal interpreter class (Function, Code, Frame...). - -* ``space.newlist([w_x, w_y, w_z...])``: returns a wrapped list from a list of already-wrapped objects. - -* ``space.newtuple([w_x, w_y, w_z...])``: returns a wrapped tuple from a list of already-wrapped objects. - -* ``space.newdict([])``: returns a new, empty wrapped dictionary. (The argument list can contain tuples ``(w_key, w_value)`` but it seems that such a use is not common.) - -* ``space.newbool(x)``: returns ``space.w_False`` or ``space.w_True`` depending on the truth value of ``x``. - -There are a few less common constructors, described in the comments at the end of ``pypy.interpreter.baseobjspace``. - - -Constant ``w_xxx`` objects --------------------------- - -The object space holds a number of predefined wrapped objects. The most common ones are ``space.w_None`` and ``space.w_XxxError`` for each exception class ``XxxError`` (e.g. ``space.w_KeyError``, ``space.w_IndexError``, etc.). - - -Inspecting ``w_xxx`` objects ----------------------------- - -The most delicate operation is for the interpreter to inspect a wrapped object, which must be done via the object space. - -* ``space.is_true(w_x)``: checks if the given wrapped object is considered to be ``True`` or ``False``. You must never use the truth-value of ``w_x`` directly; doing so (e.g. writing ``if w_x:``) will give you an error reminding you of the problem. - -* ``w_x == w_y`` or ``w_x is w_y``: DON'T DO THAT. The only half-official exception is to check if ``w_x`` contains a wrapped ``None``: you can write ``w_x == space.w_None``. Follow this rule; the case of ``None`` is easy to fix globally later if we find out that we need to. The rationale for this rule is that there is no reason that two wrappers are related in any way even if they contain what looks like the same object at application-level. To check for equality, use ``space.is_true(space.eq(w_x, w_y))``. To check for identity, use ``space.is_true(space.is_(w_x, w_y))``. - -* ``space.unpackiterable(w_x)``: this helper iterates ``w_x`` (using ``space.iter()`` and ``space.next()``) and collects the resulting wrapped objects in a list. Of course, in cases where iterating directly is better than collecting the elements in a list first, you should use ``space.iter()`` and ``space.next()`` directly. - -* ``space.unwrap(w_x)``: inverse of ``space.wrap()``. Attention! Using ``space.unwrap()`` must be avoided whenever possible. Actually, it should only be used when you are sure that ``w_x`` contains a simple object or an internal interpreter object that was wrapped with ``space.wrap()``. This function is a bit buggy anyway, and it should probably be fixed at some point: we should specify what kind of unwrapped result is expected (and raise a TypeError appropriately). - -Remember that you can usually obtain the information you want by invoking operations or methods on the wrapped objects; e.g. ``space.call_method(w_dict, 'iterkeys')`` returns a wrapped iterable that you can decode with ``space.unpackiterable()``. - - -Application-level exceptions ----------------------------- - -Interpreter-level code can use exceptions freely. However, all application-level exceptions are represented as an ``OperationError`` at interpreter-level. In other words, all exceptions that are potentially visible at application-level are internally an ``OperationError``. This is the case of all errors reported by the object space operations (``space.add()`` etc.). - -To raise an application-level exception:: - - raise OperationError(space.w_XxxError, space.wrap("message")) - -To catch a specific application-level exception:: - - try: - ... - except OperationError, e: - if not e.match(space, space.w_XxxError): - raise - ... - -This construct catches all application-level exceptions, so we have to match it against the particular ``w_XxxError`` we are interested in and re-raise other exceptions. The exception instance ``e`` holds two attributes that you can inspect: ``e.w_type`` and ``e.w_value``. Do not use ``e.w_type`` to match an exception, as this will miss exceptions that are instances of subclasses. - -We are thinking about replacing ``OperationError`` with a family of common exception classes (e.g. ``AppKeyError``, ``AppIndexError``...) so that we can more easily catch them. The generic ``AppError`` would stand for all other application-level classes. diff --git a/pypy/interpreter/__init__.py b/pypy/interpreter/__init__.py deleted file mode 100644 index 1bb8bf6d7f..0000000000 --- a/pypy/interpreter/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py deleted file mode 100644 index 35d5d25381..0000000000 --- a/pypy/interpreter/argument.py +++ /dev/null @@ -1,290 +0,0 @@ -""" -Arguments objects. -""" - -from pypy.interpreter.error import OperationError - - -class Arguments: - """ - Collects the arguments of a function call. - - Instances should be considered immutable. - """ - - ### Construction ### - - blind_arguments = 0 - - def __init__(self, space, args_w=[], kwds_w={}, - w_stararg=None, w_starstararg=None): - self.space = space - self.arguments_w = list(args_w) - self.kwds_w = kwds_w.copy() - self.w_stararg = w_stararg - if w_starstararg is not None: - # unlike the * argument we unpack the ** argument immediately. - # maybe we could allow general mappings? - if not space.is_true(space.isinstance(w_starstararg, space.w_dict)): - raise OperationError(space.w_TypeError, - space.wrap("the keywords must be " - "a dictionary")) - for w_key in space.unpackiterable(w_starstararg): - key = space.unwrap(w_key) - if not isinstance(key, str): - raise OperationError(space.w_TypeError, - space.wrap("keywords must be strings")) - if key in self.kwds_w: - raise OperationError(self.space.w_TypeError, - self.space.wrap("got multiple values " - "for keyword argument " - "'%s'" % key)) - self.kwds_w[key] = space.getitem(w_starstararg, w_key) - - def frompacked(space, w_args=None, w_kwds=None): - """Convenience static method to build an Arguments - from a wrapped sequence and a wrapped dictionary.""" - return Arguments(space, w_stararg=w_args, w_starstararg=w_kwds) - frompacked = staticmethod(frompacked) - - def __repr__(self): - if self.w_stararg is None: - if not self.kwds_w: - return 'Arguments(%s)' % (self.arguments_w,) - else: - return 'Arguments(%s, %s)' % (self.arguments_w, self.kwds_w) - else: - return 'Arguments(%s, %s, %s)' % (self.arguments_w, - self.kwds_w, - self.w_stararg) - - ### Manipulation ### - - def unpack(self): - "Return a ([w1,w2...], {'kw':w3...}) pair." - if self.w_stararg is not None: - self.arguments_w += self.space.unpackiterable(self.w_stararg) - self.w_stararg = None - return self.arguments_w, self.kwds_w - - def prepend(self, w_firstarg): - "Return a new Arguments with a new argument inserted first." - args = Arguments(self.space, [w_firstarg] + self.arguments_w, - self.kwds_w, self.w_stararg) - args.blind_arguments = self.blind_arguments + 1 - return args - - def fixedunpack(self, argcount): - """The simplest argument parsing: get the 'argcount' arguments, - or raise a real ValueError if the length is wrong.""" - if self.kwds_w: - raise ValueError, "no keyword arguments expected" - if len(self.arguments_w) > argcount: - raise ValueError, "too many arguments (%d expected)" % argcount - if self.w_stararg is not None: - self.arguments_w += self.space.unpackiterable(self.w_stararg, - argcount - len(self.arguments_w)) - self.w_stararg = None - elif len(self.arguments_w) < argcount: - raise ValueError, "not enough arguments (%d expected)" % argcount - return self.arguments_w - - def firstarg(self): - "Return the first argument for inspection." - if self.arguments_w: - return self.arguments_w[0] - if self.w_stararg is None: - return None - w_iter = self.space.iter(self.w_stararg) - try: - return self.space.next(w_iter) - except OperationError, e: - if not e.match(self.space, self.space.w_StopIteration): - raise - return None - - ### Parsing for function calls ### - - def parse(self, fnname, signature, defaults_w=[]): - """Parse args and kwargs to initialize a frame - according to the signature of code object. - """ - space = self.space - # If w_stararg is not exactly a tuple, unpack it now: - # self.match_signature() assumes that it can use it directly for - # a matching *arg in the callee's signature. - if self.w_stararg is not None: - if not space.is_true(space.is_(space.type(self.w_stararg), - space.w_tuple)): - self.unpack() - try: - return self.match_signature(signature, defaults_w) - except ArgErr, e: - raise OperationError(space.w_TypeError, - space.wrap(e.getmsg(self, fnname))) - - def match_signature(self, signature, defaults_w=[]): - """Parse args and kwargs according to the signature of a code object, - or raise ArgumentMatch in case of failure. - """ - argnames, varargname, kwargname = signature - # - # args_w = list of the normal actual parameters, wrapped - # kwds_w = real dictionary {'keyword': wrapped parameter} - # argnames = list of formal parameter names - # scope_w = resulting list of wrapped values - # - co_argcount = len(argnames) # expected formal arguments, without */** - if self.w_stararg is not None: - # There is a case where we don't have to unpack() a w_stararg: - # if it matches exactly a *arg in the signature. - if len(self.arguments_w) == co_argcount and varargname is not None: - pass - else: - self.unpack() # sets self.w_stararg to None - args_w = self.arguments_w - kwds_w = self.kwds_w - - # put as many positional input arguments into place as available - scope_w = args_w[:co_argcount] - input_argcount = len(scope_w) - - # check that no keyword argument conflicts with these - # note that for this purpose we ignore the first blind_arguments, - # which were put into place by prepend(). This way, keywords do - # not conflict with the hidden extra argument bound by methods. - if kwds_w: - for name in argnames[self.blind_arguments:input_argcount]: - if name in kwds_w: - raise ArgErrMultipleValues(name) - - remainingkwds_w = kwds_w.copy() - if input_argcount < co_argcount: - # not enough args, fill in kwargs or defaults if exists - def_first = co_argcount - len(defaults_w) - for i in range(input_argcount, co_argcount): - name = argnames[i] - if name in remainingkwds_w: - scope_w.append(remainingkwds_w[name]) - del remainingkwds_w[name] - elif i >= def_first: - scope_w.append(defaults_w[i-def_first]) - else: - raise ArgErrCount(signature, defaults_w, False) - - # collect extra positional arguments into the *vararg - if varargname is not None: - if self.w_stararg is None: # common case - scope_w.append(self.space.newtuple(args_w[co_argcount:])) - else: # shortcut for the non-unpack() case above - scope_w.append(self.w_stararg) - elif len(args_w) > co_argcount: - raise ArgErrCount(signature, defaults_w, True) - - # collect extra keyword arguments into the **kwarg - if kwargname is not None: - w_kwds = self.space.newdict([]) - for key, w_value in remainingkwds_w.items(): - self.space.setitem(w_kwds, self.space.wrap(key), w_value) - scope_w.append(w_kwds) - elif remainingkwds_w: - raise ArgErrUnknownKwds(remainingkwds_w) - return scope_w - - ### Argument <-> list of w_objects together with "shape" information - - def flatten(self): - shape_cnt = len(self.arguments_w) # Number of positional args - shape_keys = self.kwds_w.keys() # List of keywords (strings) - shape_star = self.w_stararg is not None # Flag: presence of *arg - data_w = self.arguments_w + [self.kwds_w[key] for key in shape_keys] - if shape_star: - data_w.append(self.w_stararg) - return (shape_cnt, tuple(shape_keys), shape_star), data_w - - def fromshape(space, (shape_cnt, shape_keys, shape_star), data_w): - args_w = data_w[:shape_cnt] - kwds_w = {} - for i in range(len(shape_keys)): - kwds_w[shape_keys[i]] = data_w[shape_cnt+i] - if shape_star: - w_star = data_w[-1] - else: - w_star = None - return Arguments(space, args_w, kwds_w, w_star) - fromshape = staticmethod(fromshape) - # XXX the "shape" tuple should be considered as a black box from - # other code, but translator/genc.h examines it. - - -# -# ArgErr family of exceptions raised in case of argument mismatch. -# We try to give error messages following CPython's, which are very informative. -# - -class ArgErr(Exception): - pass - -class ArgErrCount(ArgErr): - def getmsg(self, args, fnname): - signature, defaults_w, too_many = self.args # from self.__init__() - argnames, varargname, kwargname = signature - args_w, kwds_w = args.unpack() - nargs = len(args_w) - n = len(argnames) - if n == 0: - if kwargname is not None: - msg2 = "non-keyword " - else: - msg2 = "" - nargs += len(kwds_w) - msg = "%s() takes no %sargument (%d given)" % ( - fnname, - msg2, - nargs) - else: - defcount = len(defaults_w) - if defcount == 0 and varargname is None: - msg1 = "exactly" - elif too_many: - msg1 = "at most" - else: - msg1 = "at least" - n -= defcount - if kwargname is not None: - msg2 = "non-keyword " - else: - msg2 = "" - if n == 1: - plural = "" - else: - plural = "s" - msg = "%s() takes %s %d %sargument%s (%d given)" % ( - fnname, - msg1, - n, - msg2, - plural, - nargs) - return msg - -class ArgErrMultipleValues(ArgErr): - def getmsg(self, args, fnname): - argname, = self.args # from self.__init__() - msg = "%s() got multiple values for keyword argument '%s'" % ( - fnname, - argname) - return msg - -class ArgErrUnknownKwds(ArgErr): - def getmsg(self, args, fnname): - kwds_w, = self.args # from self.__init__() - if len(kwds_w) == 1: - msg = "%s() got an unexpected keyword argument '%s'" % ( - fnname, - kwds_w.keys()[0]) - else: - msg = "%s() got %d unexpected keyword arguments" % ( - fnname, - len(kwds_w)) - return msg diff --git a/pypy/interpreter/autopath.py b/pypy/interpreter/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/interpreter/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py deleted file mode 100644 index 8c0c6d4f2c..0000000000 --- a/pypy/interpreter/baseobjspace.py +++ /dev/null @@ -1,371 +0,0 @@ -from pypy.interpreter.executioncontext import ExecutionContext -from pypy.interpreter.error import OperationError -from pypy.interpreter.miscutils import getthreadlocals -from pypy.interpreter.argument import Arguments -from pypy.tool.cache import Cache - -__all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'BaseWrappable', - 'W_Root'] - - -class W_Root: - """This is the abstract root class of all wrapped objects that live - in a 'normal' object space like StdObjSpace.""" - def getdict(self): - return None - def getclass(self, space): - return space.gettypeobject(self.typedef) - -class BaseWrappable(W_Root): - """A subclass of BaseWrappable is an internal, interpreter-level class - that can nevertheless be exposed at application-level by space.wrap().""" - def __spacebind__(self, space): - return self - -class Wrappable(BaseWrappable, object): - """Same as BaseWrappable, just new-style instead.""" - -class ObjSpace(object): - """Base class for the interpreter-level implementations of object spaces. - http://codespeak.net/moin/pypy/moin.cgi/ObjectSpace""" - - full_exceptions = True # full support for exceptions (normalization & more) - - def __init__(self): - "NOT_RPYTHON: Basic initialization of objects." - self._gatewaycache = Cache() - # sets all the internal descriptors - self.initialize() - - def __repr__(self): - return self.__class__.__name__ - - def make_builtins(self, for_builtins): - "NOT_RPYTHON: only for initializing the space." - # initializing builtins may require creating a frame which in - # turn already accesses space.w_builtins, provide a dummy one ... - self.w_builtins = self.newdict([]) - - # insert stuff into the newly-made builtins - for key, w_value in for_builtins.items(): - self.setitem(self.w_builtins, self.wrap(key), w_value) - - assert not hasattr(self, 'builtin') - if not hasattr(self, 'sys'): - self.make_sys() - - from pypy.interpreter.extmodule import BuiltinModule - - # the builtins are iteratively initialized - self.builtin = BuiltinModule(self, '__builtin__', self.w_builtins) - self.w_builtin = self.wrap(self.builtin) - - # initialize with "bootstrap types" from objspace (e.g. w_None) - for name, value in self.__dict__.items(): - if name.startswith('w_'): - name = name[2:] - if name.startswith('builtin') or name.startswith('sys'): - continue - #print "setitem: space instance %-20s into builtins" % name - self.setitem(self.w_builtins, self.wrap(name), value) - - self.sys.setbuiltinmodule(self.w_builtin, '__builtin__') - - def make_sys(self): - "NOT_RPYTHON: only for initializing the space." - from pypy.interpreter.extmodule import BuiltinModule - assert not hasattr(self, 'sys') - self.sys = BuiltinModule(self, 'sys') - self.w_sys = self.wrap(self.sys) - self.sys.setbuiltinmodule(self.w_sys, 'sys') - - def get_builtin_module(self, name): - if name not in self.sys.builtin_modules: - return None - module = self.sys.builtin_modules[name] - if module is None: - from pypy.interpreter.extmodule import BuiltinModule - module = BuiltinModule(self, name) - self.sys.builtin_modules[name] = module - w_module = self.wrap(module) - self.sys.setbuiltinmodule(w_module, name) - return w_module - - def initialize(self): - """NOT_RPYTHON: Abstract method that should put some minimal - content into the w_builtins.""" - - def loadfromcache(self, key, builder, cache): - return cache.getorbuild(key, builder, self) - loadfromcache._specialize_ = "location" - - def getexecutioncontext(self): - "Return what we consider to be the active execution context." - ec = getthreadlocals().executioncontext - if ec is None: - ec = self.createexecutioncontext() - return ec - - def createexecutioncontext(self): - "Factory function for execution contexts." - return ExecutionContext(self) - - # Following is a friendly interface to common object space operations - # that can be defined in term of more primitive ones. Subclasses - # may also override specific functions for performance. - - #def is_(self, w_x, w_y): -- not really useful. Must be subclassed - # "'x is y'." - # w_id_x = self.id(w_x) - # w_id_y = self.id(w_y) - # return self.eq(w_id_x, w_id_y) - - def not_(self, w_obj): - return self.wrap(not self.is_true(w_obj)) - - def unwrapdefault(self, w_value, default): - if w_value is None or w_value == self.w_None: - return default - else: - return self.unwrap(w_value) - - def newbool(self, b): - if b: - return self.w_True - else: - return self.w_False - - def unpackiterable(self, w_iterable, expected_length=None): - """Unpack an iterable object into a real (interpreter-level) list. - Raise a real ValueError if the length is wrong.""" - w_iterator = self.iter(w_iterable) - items = [] - while True: - try: - w_item = self.next(w_iterator) - except OperationError, e: - if not e.match(self, self.w_StopIteration): - raise - break # done - if expected_length is not None and len(items) == expected_length: - raise ValueError, "too many values to unpack" - items.append(w_item) - if expected_length is not None and len(items) < expected_length: - i = len(items) - if i == 1: - plural = "" - else: - plural = "s" - raise ValueError, "need more than %d value%s to unpack" % (i, plural) - return items - - def unpacktuple(self, w_tuple, expected_length=None): - """Same as unpackiterable(), but only for tuples. - Only use for bootstrapping or performance reasons.""" - tuple_length = self.unwrap(self.len(w_tuple)) - if expected_length is not None and tuple_length != expected_length: - raise ValueError, "got a tuple of length %d instead of %d" % ( - tuple_length, expected_length) - items = [ - self.getitem(w_tuple, self.wrap(i)) for i in range(tuple_length)] - return items - - def exception_match(self, w_exc_type, w_check_class): - """Checks if the given exception type matches 'w_check_class'.""" - check_list = [w_check_class] - while check_list: - w_item = check_list.pop() - # Match identical items. - if self.is_true(self.is_(w_exc_type, w_item)): - return True - try: - # Match subclasses. - if self.is_true(self.issubtype(w_exc_type, w_item)): - return True - except OperationError: - # Assume that this is a TypeError: w_item not a type, - # and assume that w_item is then actually a tuple. - try: - exclst = self.unpackiterable(w_item) - except OperationError: - # hum, maybe it is not a tuple after all, and w_exc_type - # was not a type at all (string exceptions). Give up. - continue - check_list.extend(exclst) - return False - - def call(self, w_callable, w_args, w_kwds=None): - args = Arguments.frompacked(self, w_args, w_kwds) - return self.call_args(w_callable, args) - - def call_function(self, w_func, *args_w): - args = Arguments(self, list(args_w)) - return self.call_args(w_func, args) - - def call_method(self, w_obj, methname, *arg_w): - w_meth = self.getattr(w_obj, self.wrap(methname)) - return self.call_function(w_meth, *arg_w) - - def isinstance(self, w_obj, w_type): - w_objtype = self.type(w_obj) - return self.issubtype(w_objtype, w_type) - - def eval(self, expression, w_globals, w_locals): - "NOT_RPYTHON: For internal debugging." - import types - from pypy.interpreter.pycode import PyCode - if isinstance(expression, str): - expression = compile(expression, '?', 'eval') - if isinstance(expression, types.CodeType): - expression = PyCode()._from_code(expression) - if not isinstance(expression, PyCode): - raise TypeError, 'space.eval(): expected a string, code or PyCode object' - return expression.exec_code(self, w_globals, w_locals) - - def exec_(self, statement, w_globals, w_locals): - "NOT_RPYTHON: For internal debugging." - import types - from pypy.interpreter.pycode import PyCode - if isinstance(statement, str): - statement = compile(statement, '?', 'exec') - if isinstance(statement, types.CodeType): - statement = PyCode()._from_code(statement) - if not isinstance(statement, PyCode): - raise TypeError, 'space.exec_(): expected a string, code or PyCode object' - return statement.exec_code(self, w_globals, w_locals) - - -## Table describing the regular part of the interface of object spaces, -## namely all methods which only take w_ arguments and return a w_ result -## (if any). XXX Maybe we should say that these methods must be accessed -## as 'space.op.xxx()' instead of directly 'space.xxx()'. - -ObjSpace.MethodTable = [ -# method name # symbol # number of arguments # special method name(s) - ('is_', 'is', 2, []), - ('id', 'id', 1, []), - ('type', 'type', 1, []), - ('issubtype', 'issubtype', 2, []), # not for old-style classes - ('repr', 'repr', 1, ['__repr__']), - ('str', 'str', 1, ['__str__']), - ('len', 'len', 1, ['__len__']), - ('hash', 'hash', 1, ['__hash__']), - ('getattr', 'getattr', 2, ['__getattribute__']), - ('setattr', 'setattr', 3, ['__setattr__']), - ('delattr', 'delattr', 2, ['__delattr__']), - ('getitem', 'getitem', 2, ['__getitem__']), - ('setitem', 'setitem', 3, ['__setitem__']), - ('delitem', 'delitem', 2, ['__delitem__']), - ('pos', 'pos', 1, ['__pos__']), - ('neg', 'neg', 1, ['__neg__']), - ('nonzero', 'truth', 1, ['__nonzero__']), - ('abs' , 'abs', 1, ['__abs__']), - ('hex', 'hex', 1, ['__hex__']), - ('oct', 'oct', 1, ['__oct__']), - ('round', 'round', 2, []), - ('ord', 'ord', 1, []), - ('invert', '~', 1, ['__invert__']), - ('add', '+', 2, ['__add__', '__radd__']), - ('sub', '-', 2, ['__sub__', '__rsub__']), - ('mul', '*', 2, ['__mul__', '__rmul__']), - ('truediv', '/', 2, ['__truediv__', '__rtruediv__']), - ('floordiv', '//', 2, ['__floordiv__', '__rfloordiv__']), - ('div', 'div', 2, ['__div__', '__rdiv__']), - ('mod', '%', 2, ['__mod__', '__rmod__']), - ('divmod', 'divmod', 2, ['__divmod__', '__rdivmod__']), - ('pow', '**', 3, ['__pow__', '__rpow__']), - ('lshift', '<<', 2, ['__lshift__', '__rlshift__']), - ('rshift', '>>', 2, ['__rshift__', '__rrshift__']), - ('and_', '&', 2, ['__and__', '__rand__']), - ('or_', '|', 2, ['__or__', '__ror__']), - ('xor', '^', 2, ['__xor__', '__rxor__']), - ('int', 'int', 1, ['__int__']), - ('float', 'float', 1, ['__float__']), - ('long', 'long', 1, ['__long__']), - ('inplace_add', '+=', 2, ['__iadd__']), - ('inplace_sub', '-=', 2, ['__isub__']), - ('inplace_mul', '*=', 2, ['__imul__']), - ('inplace_truediv', '/=', 2, ['__itruediv__']), - ('inplace_floordiv','//=', 2, ['__ifloordiv__']), - ('inplace_div', 'div=', 2, ['__idiv__']), - ('inplace_mod', '%=', 2, ['__imod__']), - ('inplace_pow', '**=', 2, ['__ipow__']), - ('inplace_lshift', '<<=', 2, ['__ilshift__']), - ('inplace_rshift', '>>=', 2, ['__irshift__']), - ('inplace_and', '&=', 2, ['__iand__']), - ('inplace_or', '|=', 2, ['__ior__']), - ('inplace_xor', '^=', 2, ['__ixor__']), - ('lt', '<', 2, ['__lt__', '__gt__']), - ('le', '<=', 2, ['__le__', '__ge__']), - ('eq', '==', 2, ['__eq__', '__eq__']), - ('ne', '!=', 2, ['__ne__', '__ne__']), - ('gt', '>', 2, ['__gt__', '__lt__']), - ('ge', '>=', 2, ['__ge__', '__le__']), - ('contains', 'contains', 2, ['__contains__']), - ('iter', 'iter', 1, ['__iter__']), - ('next', 'next', 1, ['next']), -# ('call', 'call', 3, ['__call__']), - ('get', 'get', 3, ['__get__']), - ('set', 'set', 3, ['__set__']), - ('delete', 'delete', 2, ['__delete__']), - ] - -ObjSpace.BuiltinModuleTable = [ - '__builtin__', - 'sys', - ] - -ObjSpace.ConstantTable = [ - 'None', - 'False', - 'True', - 'Ellipsis', - 'NotImplemented', - ] - -ObjSpace.ExceptionTable = [ - 'ArithmeticError', - 'AssertionError', - 'AttributeError', - 'EOFError', - 'EnvironmentError', - 'Exception', - 'FloatingPointError', - 'IOError', - 'ImportError', - 'IndentationError', - 'IndexError', - 'KeyError', - 'KeyboardInterrupt', - 'LookupError', - 'MemoryError', - 'NameError', - 'NotImplementedError', - 'OSError', - 'OverflowError', - 'ReferenceError', - 'RuntimeError', - 'StandardError', - 'StopIteration', - 'SyntaxError', - 'SystemError', - 'SystemExit', - 'TabError', - 'TypeError', - 'UnboundLocalError', - 'UnicodeError', - 'ValueError', - 'ZeroDivisionError', - ] - -## Irregular part of the interface: -# -# wrap(x) -> w_x -# unwrap(w_x) -> x -# is_true(w_x) -> True or False -# newtuple([w_1, w_2,...]) -> w_tuple -# newlist([w_1, w_2,...]) -> w_list -# newstring([w_1, w_2,...]) -> w_string from ascii numbers (bytes) -# newdict([(w_key,w_value),...]) -> w_dict -#newslice(w_start,w_stop,w_step) -> w_slice (any argument may be a real None) -# call_args(w_obj,Arguments()) -> w_result diff --git a/pypy/interpreter/debug.py b/pypy/interpreter/debug.py deleted file mode 100644 index 4927cd90a6..0000000000 --- a/pypy/interpreter/debug.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -PyPy-oriented interface to pdb. -""" - -import pdb - -def fire(operationerr): - if not operationerr.debug_excs: - return - exc, val, tb = operationerr.debug_excs[-1] - pdb.post_mortem(tb) diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py deleted file mode 100644 index 3c3dd37055..0000000000 --- a/pypy/interpreter/error.py +++ /dev/null @@ -1,171 +0,0 @@ -import os, sys - -AUTO_DEBUG = os.getenv('PYPY_DEBUG') - - -class PyPyError(Exception): - "Raise this when you encounter an exceptional situation in PyPy itself." - def __init__(self, space, operationerr): - self.space = space - self.operationerr = operationerr - - -class OperationError(Exception): - """Interpreter-level exception that signals an exception that should be - sent to the application level. - - OperationError instances have three public attributes (and no .args), - w_type, w_value and application_traceback, which contain the wrapped - type and value describing the exception, and the unwrapped list of - (frame, instruction_position) making the application-level traceback. - """ - - def __init__(self, w_type, w_value, tb=None): - assert w_type is not None, w_value - self.w_type = w_type - self.w_value = w_value - self.application_traceback = tb - self.debug_excs = [] - - def match(self, space, w_check_class): - "Check if this application-level exception matches 'w_check_class'." - return space.exception_match(self.w_type, w_check_class) - - def __str__(self): - "NOT_RPYTHON: Convenience for tracebacks." - return '[%s: %s]' % (self.w_type, self.w_value) - - def errorstr(self, space): - "NOT_RPYTHON: The exception class and value, as a string." - exc_type = space.unwrap( - space.getattr(self.w_type, space.wrap('__name__'))) - exc_value = space.unwrap(space.str(self.w_value)) - return '%s: %s' % (exc_type, exc_value) - - def getframe(self): - "The frame this exception was raised in, or None." - if self.application_traceback: - return self.application_traceback.frame - else: - return None - - def record_interpreter_traceback(self): - """NOT_RPYTHON: Records the current traceback inside the interpreter. - This traceback is only useful to debug the interpreter, not the - application.""" - self.debug_excs.append(sys.exc_info()) - - def print_application_traceback(self, space, file=None): - "NOT_RPYTHON: Dump a standard application-level traceback." - if file is None: file = sys.stderr - self.print_app_tb_only(file) - print >> file, self.errorstr(space) - - def print_app_tb_only(self, file): - "NOT_RPYTHON" - tb = self.application_traceback - if tb: - import linecache - print >> file, "Traceback (application-level):" - while tb is not None: - co = tb.frame.code - lineno = tb.lineno - fname = co.co_filename - if fname.startswith('<inline>\n'): - lines = fname.split('\n') - fname = lines[0].strip() - try: - l = lines[lineno] - except IndexError: - l = '' - else: - l = linecache.getline(fname, lineno) - print >> file, " File \"%s\"," % fname, - print >> file, "line", lineno, "in", co.co_name - if l: - if l.endswith('\n'): - l = l[:-1] - print >> file, l - tb = tb.next - - def print_detailed_traceback(self, space=None, file=None): - """NOT_RPYTHON: Dump a nice detailed interpreter- and - application-level traceback, useful to debug the interpreter.""" - if file is None: file = sys.stderr - for i in range(len(self.debug_excs)-1, -1, -1): - import traceback - interpr_file = LinePrefixer(file, '||') - print >> interpr_file, "Traceback (interpreter-level):" - traceback.print_tb(self.debug_excs[i][2], file=interpr_file) - if self.debug_excs: - from pypy.tool import tb_server - tb_server.publish_exc(self.debug_excs[-1]) - self.print_app_tb_only(file) - if space is None: - exc_typename = str(self.w_type) - exc_value = self.w_value - else: - w = space.wrap - if space.is_true(space.is_(space.type(self.w_type), space.w_str)): - exc_typename = space.unwrap(self.w_type) - else: - exc_typename = space.unwrap( - space.getattr(self.w_type, w('__name__'))) - if self.w_value == space.w_None: - exc_value = None - else: - exc_value = space.unwrap(space.str(self.w_value)) - print >> file, '(application-level)', - if not exc_value: - print >> file, exc_typename - else: - print >> file, exc_typename+':', exc_value - if AUTO_DEBUG: - import debug - debug.fire(self) - - -# Utilities - -def inlinecompile(source, space, symbol='exec'): - """Compile the given 'source' string. - This function differs from the built-in compile() because it abuses - co_filename to store a copy of the complete source code. - This lets OperationError.print_application_traceback() print the - actual source line in the traceback.""" - compile = space.builtin.compile - w = space.wrap - return compile(w(source), w('<inline>\n%s'%source), w(symbol), w(0), w(0)) - - -class LinePrefixer: - """File-like class that inserts a prefix string - at the beginning of each line it prints.""" - def __init__(self, file, prefix): - self.file = file - self.prefix = prefix - self.linestart = True - def write(self, data): - if self.linestart: - self.file.write(self.prefix) - if data.endswith('\n'): - data = data[:-1] - self.linestart = True - else: - self.linestart = False - self.file.write(data.replace('\n', '\n'+self.prefix)) - if self.linestart: - self.file.write('\n') - -### installing the excepthook for OperationErrors -##def operr_excepthook(exctype, value, traceback): -## if issubclass(exctype, OperationError): -## value.debug_excs.append((exctype, value, traceback)) -## value.print_detailed_traceback() -## else: -## old_excepthook(exctype, value, traceback) -## from pypy.tool import tb_server -## tb_server.publish_exc((exctype, value, traceback)) - -##old_excepthook = sys.excepthook -##sys.excepthook = operr_excepthook diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py deleted file mode 100644 index 578a7a95fe..0000000000 --- a/pypy/interpreter/eval.py +++ /dev/null @@ -1,138 +0,0 @@ -""" -This module defines the abstract base classes that support execution: -Code and Frame. -""" -from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable - - -class Code(Wrappable): - """A code is a compiled version of some source code. - Abstract base class.""" - - def __init__(self, co_name): - self.co_name = co_name - - def create_frame(self, space, w_globals, closure=None): - "Create an empty frame object suitable for evaluation of this code." - raise TypeError, "abstract" - - def exec_code(self, space, w_globals, w_locals): - "Implements the 'exec' statement." - frame = self.create_frame(space, w_globals) - frame.setdictscope(w_locals) - return frame.run() - - def signature(self): - "([list-of-arg-names], vararg-name-or-None, kwarg-name-or-None)." - return [], None, None - - def getvarnames(self): - """List of names including the arguments, vararg and kwarg, - and possibly more locals.""" - argnames, varargname, kwargname = self.signature() - if varargname is not None: - argnames = argnames + [varargname] - if kwargname is not None: - argnames = argnames + [kwargname] - return argnames - - def getformalargcount(self): - argnames, varargname, kwargname = self.signature() - argcount = len(argnames) - if varargname is not None: - argcount += 1 - if kwargname is not None: - argcount += 1 - return argcount - - def getdocstring(self): - return None - -class UndefinedClass(object): - pass -UNDEFINED = UndefinedClass() # marker for undefined local variables - - -class Frame(Wrappable): - """A frame is an environment supporting the execution of a code object. - Abstract base class.""" - - def __init__(self, space, code, w_globals=None, numlocals=-1): - self.space = space - self.code = code # Code instance - self.w_globals = w_globals # wrapped dict of globals - self.w_locals = None # wrapped dict of locals - if numlocals < 0: # compute the minimal size based on arguments - numlocals = len(code.getvarnames()) - self.numlocals = numlocals - - def resume(self): - "Resume the execution of the frame from its current state." - executioncontext = self.space.getexecutioncontext() - previous = executioncontext.enter(self) - try: - result = self.eval(executioncontext) - finally: - executioncontext.leave(previous) - return result - - # running a frame is usually the same as resuming it from its - # initial state, but not for generator frames - run = resume - - def eval(self, executioncontext): - "Abstract method to override." - raise TypeError, "abstract" - - def getdictscope(self): - "Get the locals as a dictionary." - self.fast2locals() - return self.w_locals - - def fget_getdictscope(space, w_self): - self = space.unwrap_builtin(w_self) - return self.getdictscope() - - def setdictscope(self, w_locals): - "Initialize the locals from a dictionary." - self.w_locals = w_locals - self.locals2fast() - - def getfastscope(self): - "Abstract. Get the fast locals as a list." - raise TypeError, "abstract" - - def setfastscope(self, scope_w): - """Abstract. Initialize the fast locals from a list of values, - where the order is according to self.code.signature().""" - raise TypeError, "abstract" - - def fast2locals(self): - # Copy values from self.fastlocals_w to self.w_locals - if self.w_locals is None: - self.w_locals = self.space.newdict([]) - varnames = self.code.getvarnames() - for name, w_value in zip(varnames, self.getfastscope()): - if w_value is not UNDEFINED: - w_name = self.space.wrap(name) - self.space.setitem(self.w_locals, w_name, w_value) - - def locals2fast(self): - # Copy values from self.w_locals to self.fastlocals_w - assert self.w_locals is not None - varnames = self.code.getvarnames() - - new_fastlocals_w = [UNDEFINED]*self.numlocals - - for name, i in zip(varnames, range(self.numlocals)): - w_name = self.space.wrap(varnames[i]) - try: - w_value = self.space.getitem(self.w_locals, w_name) - except OperationError, e: - if not e.match(self.space, self.space.w_KeyError): - raise - else: - new_fastlocals_w[i] = w_value - - self.setfastscope(new_fastlocals_w) diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py deleted file mode 100644 index eca7cf1cff..0000000000 --- a/pypy/interpreter/executioncontext.py +++ /dev/null @@ -1,52 +0,0 @@ -from pypy.interpreter.miscutils import getthreadlocals, Stack - -class ExecutionContext: - """An ExecutionContext holds the state of an execution thread - in the Python interpreter.""" - - def __init__(self, space): - # Note that self.framestack only contains PyFrames - self.space = space - self.framestack = Stack() - - def enter(self, frame): - locals = getthreadlocals() - previous_ec = locals.executioncontext - locals.executioncontext = self - self.framestack.push(frame) - return previous_ec - - def leave(self, previous_ec): - self.framestack.pop() - locals = getthreadlocals() - locals.executioncontext = previous_ec - - def get_w_builtins(self): - if self.framestack.empty(): - return self.space.w_builtins - else: - return self.framestack.top().w_builtins - - def make_standard_w_globals(self): - "Create a new empty 'globals' dictionary." - w_key = self.space.wrap("__builtins__") - w_value = self.get_w_builtins() - w_globals = self.space.newdict([(w_key, w_value)]) - return w_globals - - def bytecode_trace(self, frame): - "Trace function called before each bytecode." - - def exception_trace(self, operationerr): - "Trace function called upon OperationError." - operationerr.record_interpreter_traceback() - #operationerr.print_detailed_traceback(self.space) - - def sys_exc_info(self): - """Implements sys.exc_info(). - Return an OperationError instance or None.""" - for i in range(self.framestack.depth()): - frame = self.framestack.top(i) - if frame.last_exception is not None: - return frame.last_exception - return None diff --git a/pypy/interpreter/extmodule.py b/pypy/interpreter/extmodule.py deleted file mode 100644 index 2acc2e7227..0000000000 --- a/pypy/interpreter/extmodule.py +++ /dev/null @@ -1,172 +0,0 @@ -""" - -Helpers to build extension modules. - -""" - -from __future__ import generators # for generators.compiler_flag -import os, sys -import autopath -from pypy.interpreter import gateway -from pypy.interpreter.error import OperationError -from pypy.interpreter.pycode import PyCode -from pypy.interpreter.function import Function -from pypy.interpreter.module import Module - - -class BuiltinModule(Module): - """A Module subclass specifically for built-in modules.""" - - # '__builtins__' is stored in self.__dict__ by - # self.interplevel{exec,eval,execfile}() - NOT_RPYTHON_ATTRIBUTES = ['__builtins__'] - - def __init__(self, space, modulename, w_dict=None, sourcefile=None): - """NOT_RPYTHON - Load the named built-in module, by default from the source file - 'pypy/module/<name>module.py', which is app-level Python code - with a few special features that allow it to include interp-level - bits. (See pypy/module/test/foomodule.py) - - The module has two parts: the interp-level objects, stored as - attributes of 'self', and the app-level objects, stored in the - dictionary 'self.w_dict'. The app-level definition of the module - runs with 'self.w_dict' as globals. The interp-level bits are - executed with 'self.__dict__' as globals, i.e. they can read and - change the attributes of 'self' as global variables. - """ - Module.__init__(self, space, space.wrap(modulename), w_dict) - w_dict = self.w_dict - - # Compile the xxxmodule.py source file - self.__name__ = 'interpreter-level %s' % modulename - self.__file__ = sourcefile or os.path.join(autopath.pypydir, 'module', - modulename+'module.py') - space.setitem(w_dict, space.wrap('__file__'), - space.wrap(self.__file__)) - f = open(self.__file__, 'r') - modulesource = f.read() - f.close() - code = compile(modulesource, self.__file__, 'exec', - generators.compiler_flag) - pycode = PyCode()._from_code(code) - - # Set the hooks that call back from app-level to interp-level - w_builtins = space.w_builtins - self.__saved_hooks = {} - newhooks = {} - - for name, impl in [ - ('__interplevel__exec', self.interplevelexec.im_func), - ('__interplevel__eval', self.interpleveleval.im_func), - ('__interplevel__execfile', self.interplevelexecfile.im_func), - ('__import__', self.interplevelimport.im_func)]: - hook = gateway.interp2app_temp(impl).get_method(self) - w_name = space.wrap(name) - try: - self.__saved_hooks[name] = space.getitem(w_builtins, w_name) - except OperationError: - pass - w_hook = space.wrap(hook) - space.setitem(w_builtins, w_name, w_hook) - newhooks[name] = w_hook - space.setitem(self.w_dict, space.wrap('__builtins__'), - space.w_builtins) - - # Temporarily install an '__applevel__' pseudo-module - sys.modules['__applevel__'] = AppModuleHack(self) - - # Run the app-level module definition (xxxmodule.py) - pycode.exec_code(space, w_dict, w_dict) - - # Remove the pseudo-module - del sys.modules['__applevel__'] - try: - del self.__applevel__ - except AttributeError: - pass - - # Remove/restore the hooks unless they have been modified at app-level - for name, w_hook in newhooks.items(): - w_name = space.wrap(name) - try: - w_current = space.getitem(w_builtins, w_name) - except OperationError: - pass - else: - if space.is_true(space.is_(w_current, w_hook)): - if name in self.__saved_hooks: - space.setitem(w_builtins, w_name, - self.__saved_hooks[name]) - else: - space.delitem(w_builtins, w_name) - del self.__saved_hooks - - def interplevelexec(self, w_codestring): - "NOT_RPYTHON: 'exec' a string at interp-level." - codestring = self.space.unwrap(w_codestring) - exec codestring in self.__dict__ - return self.space.w_None - - def interpleveleval(self, w_codestring): - """NOT_RPYTHON: 'eval' a string at interp-level. The result must - be None or a wrapped object, which is returned to the caller.""" - space = self.space - codestring = space.unwrap(w_codestring) - w_result = eval(codestring, self.__dict__) - if w_result is None: - w_result = space.w_None # else assume that it is already wrapped - return w_result - - def interplevelexecfile(self, w_filename): - """NOT_RPYTON: 'exec' a file at interp-level. The file should be in - the same directory as the xxxmodule.py source file of the module.""" - filename = self.space.unwrap(w_filename) - filename = os.path.join(os.path.dirname(self.__file__), filename) - execfile(filename, self.__dict__) - return self.space.w_None - - def interplevelimport(self, w_modulename, w_globals, w_locals, w_fromlist): - """NOT_RPYTHON: Hook for 'from __interplevel__ import something'. - If there is a wrapped interp-level object 'w_something', returns it. - If there is an interp-level function 'def something(w_x, w_y...)', - build an appropriate gateway and returns it. - """ - space = self.space - w = space.wrap - if space.is_true(space.eq(w_modulename, w('__interplevel__'))): - if w_fromlist == space.w_None: - raise ImportError, "must use 'from __interplevel__ import xx'" - for w_name in space.unpacktuple(w_fromlist): - name = space.unwrap(w_name) - if not hasattr(self, 'w_' + name): - f = getattr(self, name) - code = gateway.BuiltinCode(f, ismethod=False, - spacearg=False) - defs_w = list(f.func_defaults or ()) - func = Function(space, code, self.w_dict, defs_w) - w_result = space.wrap(func) - else: - w_result = getattr(self, 'w_' + name) - space.setitem(self.w_dict, w_name, w_result) - return space.wrap(self) - else: - return space.call_function(self.__saved_hooks['__import__'], - w_modulename, w_globals, - w_locals, w_fromlist) - -class AppModuleHack: - """NOT_RPYTHON - For interp-level convenience: 'from __applevel__ import func' - imports the app-level function 'func' via an appropriate gateway. - """ - def __init__(self, builtinmodule): - self.space = builtinmodule.space - self.w_dict = builtinmodule.w_dict - def __getattr__(self, name): - if name.startswith('__'): - raise AttributeError, name - w_func = self.space.getitem(self.w_dict, self.space.wrap(name)) - def caller(*args): - return self.space.call_function(w_func, *args) - return caller diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py deleted file mode 100644 index 39bcd6274e..0000000000 --- a/pypy/interpreter/function.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Function objects. - -In PyPy there is no difference between built-in and user-defined function -objects; the difference lies in the code object found in their func_code -attribute. -""" - -from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.argument import Arguments - -class Function(Wrappable): - """A function is a code object captured with some environment: - an object space, a dictionary of globals, default arguments, - and an arbitrary 'closure' passed to the code object.""" - - def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, forcename=None): - self.space = space - self.name = forcename or code.co_name - self.w_doc = None # lazily read and wrapped from code.getdocstring() - self.code = code # Code instance - self.w_func_globals = w_globals # the globals dictionary - self.closure = closure # normally, list of Cell instances or None - self.defs_w = defs_w # list of w_default's - self.w_func_dict = space.newdict([]) - - def __repr__(self): - # return "function %s.%s" % (self.space, self.name) - # maybe we want this shorter: - return "<Function %s>" % self.name - - def call_args(self, args): - scope_w = args.parse(self.name, self.code.signature(), self.defs_w) - frame = self.code.create_frame(self.space, self.w_func_globals, - self.closure) - frame.setfastscope(scope_w) - return frame.run() - - def getdict(self): - return self.w_func_dict - - def setdict(self, w_dict): - self.w_func_dict = w_dict - - def descr_function_get(self, w_obj, w_cls=None): - space = self.space - wrap = space.wrap - asking_for_bound = (w_cls == space.w_None or - not space.is_true(space.is_(w_obj, space.w_None)) or - space.is_true(space.is_(w_cls, space.type(space.w_None)))) - if asking_for_bound: - if w_cls == space.w_None: - w_cls = space.type(w_obj) - return wrap(Method(space, wrap(self), w_obj, w_cls)) - else: - return wrap(Method(space, wrap(self), None, w_cls)) - - def descr_function_call(self, __args__): - return self.call_args(__args__) - - def fget_func_defaults(space, w_self): - self = space.unwrap(w_self) - values_w = self.defs_w - if not values_w: - return space.w_None - return space.newtuple(values_w) - - def fget_func_doc(space, w_self): - self = space.unwrap(w_self) - if self.w_doc is None: - self.w_doc = space.wrap(self.code.getdocstring()) - return self.w_doc - - def fset_func_doc(space, w_self, w_doc): - self = space.unwrap(w_self) - self.w_doc = w_doc - - def fdel_func_doc(space, w_self): - self = space.unwrap(w_self) - self.w_doc = space.w_None - -class Method(Wrappable): - """A method is a function bound to a specific instance or class.""" - - def __init__(self, space, w_function, w_instance, w_class): - self.space = space - self.w_function = w_function - self.w_instance = w_instance # or None - self.w_class = w_class - - def __repr__(self): - if self.w_instance: - pre = "bound" - else: - pre = "unbound" - return "%s method %s" % (pre, self.w_function.name) - - def call_args(self, args): - if self.w_instance is not None: - # bound method - args = args.prepend(self.w_instance) - else: - # unbound method - w_firstarg = args.firstarg() - if w_firstarg is not None and self.space.is_true( - self.space.isinstance(w_firstarg, self.w_class)): - pass # ok - else: - msg = ("unbound method must be called with " - "instance as first argument") # XXX fix error msg - raise OperationError(self.space.w_TypeError, - self.space.wrap(msg)) - return self.space.call_args(self.w_function, args) - - def descr_method_get(self, w_obj, w_cls=None): - space = self.space - if self.w_instance is not None: - return space.wrap(self) # already bound - else: - # only allow binding to a more specific class than before - if w_cls == space.w_None: - w_cls = space.type(w_obj) - if not space.is_true(space.issubtype(w_cls, self.w_class)): - return space.wrap(self) # subclass test failed - return space.get(self.w_function, w_obj, w_cls) - - def descr_method_call(self, __args__): - return self.call_args(__args__) - - def descr_method_getattribute(self, w_attr): - space = self.space - w_self = space.wrap(self) - w_result = space.lookup(w_self, space.unwrap(w_attr)) - if w_result is None: - return space.getattr(self.w_function, w_attr) - else: - return space.get(w_result, w_self) - -class StaticMethod(Wrappable): - """A static method. Note that there is one class staticmethod at - app-level too currently; this is only used for __new__ methods.""" - - def __init__(self, w_function): - self.w_function = w_function - - def descr_staticmethod_get(self, w_obj, w_cls=None): - return self.w_function diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py deleted file mode 100644 index 027239f8fd..0000000000 --- a/pypy/interpreter/gateway.py +++ /dev/null @@ -1,348 +0,0 @@ -""" - -Gateway between app-level and interpreter-level: -* BuiltinCode (call interp-level code from app-level) -* Gateway (a space-independent gateway to a Code object) -* app2interp (embed an app-level function into an interp-level callable) -* interp2app (publish an interp-level object to be visible from app-level) -* exportall (mass-call interp2app on a whole dict of objects) -* importall (mass-call app2interp on a whole dict of objects) - -""" - -import types, sys -from pypy.interpreter.error import OperationError -from pypy.interpreter import eval, pycode -from pypy.interpreter.function import Function, Method -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.argument import Arguments -from pypy.tool.cache import Cache - -class BuiltinCode(eval.Code): - "The code object implementing a built-in (interpreter-level) hook." - - # When a BuiltinCode is stored in a Function object, - # you get the functionality of CPython's built-in function type. - - def __init__(self, func, ismethod=None, spacearg=None): - "NOT_RPYTHON" - # 'implfunc' is the interpreter-level function. - # Note that this uses a lot of (construction-time) introspection. - eval.Code.__init__(self, func.__name__) - self.func = func - self.docstring = func.__doc__ - # extract the signature from the (CPython-level) code object - tmp = pycode.PyCode(None) - tmp._from_code(func.func_code) - # signature-based hacks: renaming arguments from w_xyz to xyz. - # Currently we enforce the following signature tricks: - # * the first arg must be either 'self' or 'space' - # * 'w_' prefixes for the rest - # * '_w' suffix for the optional '*' argument - # * alternatively a final '__args__' means an Arguments() - # Not exactly a clean approach XXX. - argnames, varargname, kwargname = tmp.signature() - argnames = list(argnames) - lookslikemethod = argnames[:1] == ['self'] - if ismethod is None: - ismethod = lookslikemethod - if spacearg is None: - spacearg = not lookslikemethod - self.ismethod = ismethod - self.spacearg = spacearg - if spacearg: - del argnames[0] - - assert kwargname is None, ( - "built-in function %r should not take a ** argument" % func) - - self.generalargs = argnames[-1:] == ['__args__'] - self.starargs = varargname is not None - assert not (self.generalargs and self.starargs), ( - "built-in function %r has both __args__ and a * argument" % func) - if self.generalargs: - del argnames[-1] - varargname = "args" - kwargname = "keywords" - elif self.starargs: - assert varargname.endswith('_w'), ( - "argument *%s of built-in function %r should end in '_w'" % - (varargname, func)) - varargname = varargname[:-2] - - for i in range(ismethod, len(argnames)): - a = argnames[i] - assert a.startswith('w_'), ( - "argument %s of built-in function %r should " - "start with 'w_'" % (a, func)) - argnames[i] = a[2:] - - self.sig = argnames, varargname, kwargname - self.minargs = len(argnames) - if self.starargs: - self.maxargs = sys.maxint - else: - self.maxargs = self.minargs - - def create_frame(self, space, w_globals, closure=None): - return BuiltinFrame(space, self, w_globals) - - def signature(self): - return self.sig - - def getdocstring(self): - return self.docstring - - -class BuiltinFrame(eval.Frame): - "Frame emulation for BuiltinCode." - # This is essentially just a delegation to the 'func' of the BuiltinCode. - # Initialization of locals is already done by the time run() is called, - # via the interface defined in eval.Frame. - - def setfastscope(self, scope_w): - argarray = list(scope_w) - if self.code.generalargs: - w_kwds = argarray.pop() - w_args = argarray.pop() - argarray.append(Arguments.frompacked(self.space, w_args, w_kwds)) - elif self.code.starargs: - w_args = argarray.pop() - argarray += self.space.unpacktuple(w_args) - if self.code.ismethod: - argarray[0] = self.space.unwrap(argarray[0]) - self.argarray = argarray - - def getfastscope(self): - raise OperationError(self.space.w_TypeError, - self.space.wrap("cannot get fastscope of a BuiltinFrame")) - - def run(self): - argarray = self.argarray - if self.code.spacearg: - w_result = self.code.func(self.space, *argarray) - else: - w_result = self.code.func(*argarray) - if w_result is None: - w_result = self.space.w_None - return w_result - - -class Gateway(Wrappable): - """General-purpose utility for the interpreter-level to create callables - that transparently invoke code objects (and thus possibly interpreted - app-level code).""" - - # This is similar to a Function object, but not bound to a particular - # object space. During the call, the object space is either given - # explicitly as the first argument (for plain function), or is read - # from 'self.space' for methods. - - # after initialization the following attributes should be set - # name - # code - # _staticglobals - # _staticdefs - - NOT_RPYTHON_ATTRIBUTES = ['_staticglobals', '_staticdefs'] - - def __spacebind__(self, space): - # to wrap a Gateway, we first make a real Function object out of it - # and the result is a wrapped version of this Function. - return self.get_function(space) - - def get_function(self, space): - return space.loadfromcache(self, - Gateway.build_all_functions, - self.getcache(space)) - - def build_all_functions(self, space): - "NOT_RPYTHON" - # the construction is supposed to be done only once in advance, - # but must be done lazily when needed only, because - # 1) it depends on the object space - # 2) the w_globals must not be built before the underlying - # _staticglobals is completely initialized, because - # w_globals must be built only once for all the Gateway - # instances of _staticglobals - if self._staticglobals is None: - w_globals = None - else: - # is there another Gateway in _staticglobals for which we - # already have a w_globals for this space ? - cache = self.getcache(space) - for value in self._staticglobals.itervalues(): - if isinstance(value, Gateway): - if value in cache.content: - # yes, we share its w_globals - fn = cache.content[value] - w_globals = fn.w_func_globals - break - else: - # no, we build all Gateways in the _staticglobals now. - w_globals = build_dict(self._staticglobals, space) - return self._build_function(space, w_globals) - - def getcache(self, space): - return space._gatewaycache - - def _build_function(self, space, w_globals): - "NOT_RPYTHON" - cache = self.getcache(space) - try: - return cache.content[self] - except KeyError: - defs = self.getdefaults(space) # needs to be implemented by subclass - fn = Function(space, self.code, w_globals, defs, forcename = self.name) - cache.content[self] = fn - return fn - - def get_method(self, obj): - # to get the Gateway as a method out of an instance, we build a - # Function and get it. - # the object space is implicitely fetched out of the instance - if isinstance(self.code, BuiltinCode): - assert self.code.ismethod, ( - 'global built-in function %r used as method' % - self.code.func) - space = obj.space - fn = self.get_function(space) - w_obj = space.wrap(obj) - return Method(space, space.wrap(fn), - w_obj, space.type(w_obj)) - - -class app2interp(Gateway): - """Build a Gateway that calls 'app' at app-level.""" - def __init__(self, app, app_name=None): - "NOT_RPYTHON" - Gateway.__init__(self) - # app must be a function whose name starts with 'app_'. - if not isinstance(app, types.FunctionType): - raise TypeError, "function expected, got %r instead" % app - if app_name is None: - if not app.func_name.startswith('app_'): - raise ValueError, ("function name must start with 'app_'; " - "%r does not" % app.func_name) - app_name = app.func_name[4:] - self.name = app_name - self.code = pycode.PyCode(None) - self.code._from_code(app.func_code) - self._staticglobals = app.func_globals - self._staticdefs = list(app.func_defaults or ()) - - def getdefaults(self, space): - "NOT_RPYTHON" - return [space.wrap(val) for val in self._staticdefs] - - def __call__(self, space, *args_w): - # to call the Gateway as a non-method, 'space' must be explicitly - # supplied. We build the Function object and call it. - fn = self.get_function(space) - return space.call_function(space.wrap(fn), *args_w) - - def __get__(self, obj, cls=None): - "NOT_RPYTHON" - if obj is None: - return self - else: - space = obj.space - w_method = space.wrap(self.get_method(obj)) - def helper_method_caller(*args_w): - return space.call_function(w_method, *args_w) - return helper_method_caller - -class interp2app(Gateway): - """Build a Gateway that calls 'f' at interp-level.""" - def __init__(self, f, app_name=None): - "NOT_RPYTHON" - Gateway.__init__(self) - # f must be a function whose name does NOT starts with 'app_' - if not isinstance(f, types.FunctionType): - raise TypeError, "function expected, got %r instead" % f - if app_name is None: - if f.func_name.startswith('app_'): - raise ValueError, ("function name %r suspiciously starts " - "with 'app_'" % f.func_name) - app_name = f.func_name - self.code = BuiltinCode(f) - self.name = app_name - self._staticdefs = list(f.func_defaults or ()) - self._staticglobals = None - - def getdefaults(self, space): - "NOT_RPYTHON" - return self._staticdefs - -def exportall(d, temporary=False): - """NOT_RPYTHON: Publish every function from a dict.""" - if temporary: - i2a = interp2app_temp - else: - i2a = interp2app - for name, obj in d.items(): - if isinstance(obj, types.FunctionType): - # names starting in 'app_' are supposedly already app-level - if name.startswith('app_'): - continue - # ignore tricky functions with another interp-level meaning - if name in ('__init__', '__new__'): - continue - # ignore names in '_xyz' - if name.startswith('_') and not name.endswith('_'): - continue - if 'app_'+name not in d: - d['app_'+name] = i2a(obj, name) - -def export_values(space, dic, w_namespace): - "NOT_RPYTHON" - for name, w_value in dic.items(): - if name.startswith('w_'): - if name == 'w_dict': - w_name = space.wrap('__dict__') - elif name == 'w_name': - w_name = space.wrap('__name__') - else: - w_name = space.wrap(name[2:]) - space.setitem(w_namespace, w_name, w_value) - -def importall(d, temporary=False): - """NOT_RPYTHON: Import all app_-level functions as Gateways into a dict.""" - if temporary: - a2i = app2interp_temp - else: - a2i = app2interp - for name, obj in d.items(): - if name.startswith('app_') and name[4:] not in d: - if isinstance(obj, types.FunctionType): - d[name[4:]] = a2i(obj, name[4:]) - -def build_dict(d, space): - """NOT_RPYTHON: - Search all Gateways and put them into a wrapped dictionary.""" - w_globals = space.newdict([]) - for value in d.itervalues(): - if isinstance(value, Gateway): - fn = value._build_function(space, w_globals) - w_name = space.wrap(value.name) - w_object = space.wrap(fn) - space.setitem(w_globals, w_name, w_object) - if hasattr(space, 'w_sys'): # give them 'sys' if it exists already - space.setitem(w_globals, space.wrap('sys'), space.w_sys) - return w_globals - - -# -# the next gateways are to be used only for -# temporary/initialization purposes -class app2interp_temp(app2interp): - "NOT_RPYTHON" - def getcache(self, space): - return self.__dict__.setdefault(space, Cache()) - # ^^^^^ - # armin suggested this - -class interp2app_temp(interp2app): - "NOT_RPYTHON" - def getcache(self, space): - return self.__dict__.setdefault(space, Cache()) diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py deleted file mode 100644 index 474a8869bd..0000000000 --- a/pypy/interpreter/generator.py +++ /dev/null @@ -1,83 +0,0 @@ -from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.eval import Frame -from pypy.interpreter.pyframe import ControlFlowException, ExitFrame - -# -# Generator support. Note that GeneratorFrame is not a subclass of PyFrame. -# PyCode objects use a custom subclass of both PyFrame and GeneratorFrame -# when they need to interpret Python bytecode that is a generator. -# Otherwise, GeneratorFrame could also be used to define, say, -# built-in generators (which are usually done in CPython as functions -# that return iterators). -# - -class GeneratorFrame(Frame): - "A frame attached to a generator." - - def run(self): - "Build a generator-iterator." - self.exhausted = False - return self.space.wrap(GeneratorIterator(self)) - - ### extra opcodes ### - - # XXX mmmh, GeneratorFrame is supposed to be independent from - # Python bytecode... Well, it is. These are not used when - # GeneratorFrame is used with other kinds of Code subclasses. - - def RETURN_VALUE(f): # overridden - raise SGeneratorReturn() - - def YIELD_VALUE(f): - w_yieldedvalue = f.valuestack.pop() - raise SYieldValue(w_yieldedvalue) - YIELD_STMT = YIELD_VALUE # misnamed in old versions of dis.opname - - -class GeneratorIterator(Wrappable): - "An iterator created by a generator." - - def __init__(self, frame): - self.space = frame.space - self.frame = frame - self.running = False - - def descr__iter__(self): - return self.space.wrap(self) - - def descr_next(self): - space = self.space - if self.running: - raise OperationError(space.w_ValueError, - space.wrap('generator already executing')) - if self.frame.exhausted: - raise OperationError(space.w_StopIteration, space.w_None) - self.running = True - try: - try: - return self.frame.resume() - except OperationError, e: - if e.match(self.space, self.space.w_StopIteration): - raise OperationError(space.w_StopIteration, space.w_None) - else: - raise - finally: - self.running = False - -# -# the specific ControlFlowExceptions used by generators -# - -class SYieldValue(ControlFlowException): - """Signals a 'yield' statement. - Argument is the wrapped object to return.""" - def action(self, frame, last_instr, executioncontext): - w_yieldvalue = self.args[0] - raise ExitFrame(w_yieldvalue) - -class SGeneratorReturn(ControlFlowException): - """Signals a 'return' statement inside a generator.""" - def emptystack(self, frame): - frame.exhausted = True - raise OperationError(frame.space.w_StopIteration, frame.space.w_None) diff --git a/pypy/interpreter/interactive.py b/pypy/interpreter/interactive.py deleted file mode 100644 index 474b0cd71d..0000000000 --- a/pypy/interpreter/interactive.py +++ /dev/null @@ -1,99 +0,0 @@ -import autopath - -from pypy.interpreter import executioncontext, baseobjspace -import sys -import code - - -class PyPyConsole(code.InteractiveConsole): - def __init__(self, objspace): - code.InteractiveConsole.__init__(self) - self.space = objspace - self.ec = executioncontext.ExecutionContext(self.space) - self.w_globals = self.ec.make_standard_w_globals() - self.space.setitem(self.w_globals, - self.space.wrap("__name__"), - self.space.wrap("__main__")) - - def interact(self, banner=None): - if banner is None: - #banner = "Python %s in pypy\n%s / %s" % ( - # sys.version, self.__class__.__name__, - # self.space.__class__.__name__) - banner = "PyPy in %s on top of Python %s" % ( - self.space.__class__.__name__, sys.version.split()[0]) - code.InteractiveConsole.interact(self, banner) - - def raw_input(self, prompt=""): - # add a character to the PyPy prompt so that you know where you - # are when you debug it with "python -i py.py" - try: - return code.InteractiveConsole.raw_input(self, prompt[0] + prompt) - except KeyboardInterrupt: - # fires into an interpreter-level console - print - banner = ("Python %s on %s\n" % (sys.version, sys.platform) + - "*** Entering interpreter-level console ***") - local = self.__dict__.copy() - for w_name in self.space.unpackiterable(self.w_globals): - local['w_' + self.space.unwrap(w_name)] = ( - self.space.getitem(self.w_globals, w_name)) - code.interact(banner=banner, local=local) - # copy back 'w_' names - for name in local: - if name.startswith('w_'): - self.space.setitem(self.w_globals, - self.space.wrap(name[2:]), - local[name]) - print '*** Leaving interpreter-level console ***' - raise - - def runcode(self, code): - # 'code' is a CPython code object - from pypy.interpreter.pycode import PyCode - pycode = PyCode()._from_code(code) - try: - pycode.exec_code(self.space, self.w_globals, self.w_globals) - except baseobjspace.OperationError, operationerr: - space = self.space - if operationerr.match(space, space.w_SystemExit): - # XXX fetch the exit code from somewhere inside the w_SystemExit - raise SystemExit - # XXX insert exception info into the application-level sys.last_xxx - operationerr.print_detailed_traceback(space) - # for debugging convenience we also insert the exception into - # the interpreter-level sys.last_xxx - sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info() - else: - try: - if sys.stdout.softspace: - print - except AttributeError: - # Don't crash if user defined stdout doesn't have softspace - pass - - def runsource(self, source, ignored_filename="<input>", symbol="single"): - hacked_filename = '<inline>\n'+source - try: - code = self.compile(source, hacked_filename, symbol) - except (OverflowError, SyntaxError, ValueError): - self.showsyntaxerror(self.filename) - return 0 - if code is None: - return 1 - self.runcode(code) - return 0 - -if __name__ == '__main__': - try: - import readline - except ImportError: - pass - - from pypy.tool import option - from pypy.tool import testit - args = option.process_options(option.get_standard_options(), - option.Options) - objspace = option.objspace() - con = PyPyConsole(objspace) - con.interact() diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py deleted file mode 100644 index d4f1a776fa..0000000000 --- a/pypy/interpreter/main.py +++ /dev/null @@ -1,65 +0,0 @@ -import autopath -from pypy.tool import option -from pypy.interpreter import executioncontext, module -from pypy.interpreter.error import OperationError, PyPyError -import sys - -def _run_eval_string(source, filename, space, eval): - if eval: - cmd = 'eval' - else: - cmd = 'exec' - - try: - if space is None: - from pypy.objspace.std import StdObjSpace - space = StdObjSpace() - - compile = space.builtin.compile - w = space.wrap - w_code = compile(w(source), w(filename), w(cmd), - w(0), w(0)) - - ec = executioncontext.ExecutionContext(space) - - mainmodule = module.Module(space, space.wrap("__main__")) - w_globals = mainmodule.w_dict - - pycode = space.unwrap(w_code) - retval = pycode.exec_code(space, w_globals, w_globals) - if eval: - return retval - else: - return - - except OperationError, operationerr: - operationerr.record_interpreter_traceback() - raise PyPyError(space, operationerr) - -def run_string(source, filename='<string>', space=None): - _run_eval_string(source, filename, space, False) - -def eval_string(source, filename='<string>', space=None): - return _run_eval_string(source, filename, space, True) - -def run_file(filename, space=None): - if __name__=='__main__': - print "Running %r with %r" % (filename, space) - istring = open(filename).read() - run_string(istring, filename, space) - -def main(argv=None): - if argv is None: - argv = sys.argv - - argv = option.process_options(option.get_standard_options(), - option.Options) - space = option.objspace() - try: - run_file(argv[0], space) - except PyPyError, pypyerr: - pypyerr.operationerr.print_detailed_traceback(pypyerr.space) - -if __name__ == '__main__': - main(sys.argv) - diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py deleted file mode 100644 index 8045f901fe..0000000000 --- a/pypy/interpreter/miscutils.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -Miscellaneous utilities. -""" - -import types - - -class Stack: - """Utility class implementing a stack.""" - - def __init__(self): - self.items = [] - - def clone(self): - s = self.__class__() - for item in self.items: - try: - item = item.clone() - except AttributeError: - pass - s.push(item) - return s - - def push(self, item): - self.items.append(item) - - def pop(self): - return self.items.pop() - - def top(self, position=0): - """'position' is 0 for the top of the stack, 1 for the item below, - and so on. It must not be negative.""" - return self.items[~position] - - def depth(self): - return len(self.items) - - def empty(self): - return not self.items - - -class InitializedClass(type): - """NOT_RPYTHON. A meta-class that allows a class to initialize itself (or - its subclasses) by calling __initclass__() as a class method.""" - def __init__(self, name, bases, dict): - super(InitializedClass, self).__init__(name, bases, dict) - for basecls in self.__mro__: - raw = basecls.__dict__.get('__initclass__') - if isinstance(raw, types.FunctionType): - raw(self) # call it as a class method - - -class RwDictProxy(object): - """NOT_RPYTHON. A dict-like class standing for 'cls.__dict__', to work - around the fact that the latter is a read-only proxy for new-style - classes.""" - - def __init__(self, cls): - self.cls = cls - - def __getitem__(self, attr): - return self.cls.__dict__[attr] - - def __setitem__(self, attr, value): - setattr(self.cls, attr, value) - - def __contains__(self, value): - return value in self.cls.__dict__ - - def items(self): - return self.cls.__dict__.items() - - -class ThreadLocals: - """Thread-local storage.""" - - def __init__(self): - self.executioncontext = None - -# XXX no thread support yet, so this is easy :-) -_locals = ThreadLocals() -def getthreadlocals(): - return _locals diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py deleted file mode 100644 index 2819347eb5..0000000000 --- a/pypy/interpreter/module.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Module objects. -""" - -from pypy.interpreter.baseobjspace import Wrappable - -class Module(Wrappable): - """A module.""" - - def __init__(self, space, w_name, w_dict=None): - self.space = space - if w_dict is None: - w_dict = space.newdict([]) - self.w_dict = w_dict - self.w_name = w_name - space.setitem(w_dict, space.wrap('__name__'), w_name) - - def getdict(self): - return self.w_dict - - def setdict(self, w_dict): - self.w_dict = w_dict - - def descr_module__new__(space, w_subtype, __args__): - module = space.allocate_instance(Module, w_subtype) - module.__init__(space, space.wrap('?')) - return space.wrap(module) - - def descr_module__init__(self, w_name): - space = self.space - self.w_name = w_name - space.setitem(self.w_dict, space.wrap('__name__'), w_name) diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py deleted file mode 100644 index 713b57ea04..0000000000 --- a/pypy/interpreter/nestedscope.py +++ /dev/null @@ -1,177 +0,0 @@ -from pypy.interpreter.error import OperationError -from pypy.interpreter.eval import UNDEFINED -from pypy.interpreter.pyopcode import PyInterpFrame -from pypy.interpreter import function, pycode -from pypy.interpreter.baseobjspace import Wrappable - -class Cell(Wrappable): - "A simple container for a wrapped value." - - def __init__(self, w_value=UNDEFINED): - self.w_value = w_value - - def clone(self): - return self.__class__(self.w_value) - - def empty(self): - return self.w_value is UNDEFINED - - def get(self): - if self.w_value is UNDEFINED: - raise ValueError, "get() from an empty cell" - return self.w_value - - def set(self, w_value): - self.w_value = w_value - - def delete(self): - if self.w_value is UNDEFINED: - raise ValueError, "delete() on an empty cell" - self.w_value = UNDEFINED - - def __repr__(self): - """ representation for debugging purposes """ - if self.w_value is UNDEFINED: - content = "" - else: - content = repr(self.w_value) - return "<%s(%s) at 0x%x>" % (self.__class__.__name__, - content, id(self)) - - -class PyNestedScopeFrame(PyInterpFrame): - """This class enhances a standard frame with nested scope abilities, - i.e. handling of cell/free variables.""" - - # Cell Vars: - # my local variables that are exposed to my inner functions - # Free Vars: - # variables coming from a parent function in which i'm nested - # 'closure' is a list of Cell instances: the received free vars. - - def __init__(self, space, code, w_globals, closure): - PyInterpFrame.__init__(self, space, code, w_globals, closure) - ncellvars = len(code.co_cellvars) - nfreevars = len(code.co_freevars) - if closure is None: - if nfreevars: - raise OperationError(space.w_TypeError, - "directly executed code object " - "may not contain free variables") - closure = [] - else: - if len(closure) != nfreevars: - raise ValueError("code object received a closure with " - "an unexpected number of free variables") - self.cells = [Cell() for i in range(ncellvars)] + closure - - def getclosure(self): - ncellvars = len(self.code.co_cellvars) # not part of the closure - return self.cells[ncellvars:] - - def fast2locals(self): - PyInterpFrame.fast2locals(self) - # cellvars are values exported to inner scopes - # freevars are values coming from outer scopes - # for locals we only want the ones exported to inner-scopes - # XXX check more exactly how CPython does it - freevarnames = self.code.co_cellvars # + self.code.co_freevars - for name, cell in zip(freevarnames, self.cells): - try: - w_value = cell.get() - except ValueError: - pass - else: - w_name = self.space.wrap(name) - self.space.setitem(self.w_locals, w_name, w_value) - - def locals2fast(self): - PyInterpFrame.locals2fast(self) - freevarnames = self.code.co_cellvars + self.code.co_freevars - for name, cell in zip(freevarnames, self.cells): - w_name = self.space.wrap(name) - try: - w_value = self.space.getitem(self.w_locals, w_name) - except OperationError, e: - if not e.match(self.space, self.space.w_KeyError): - raise - else: - cell.set(w_value) - - def setfastscope(self, scope_w): - PyInterpFrame.setfastscope(self, scope_w) - if self.code.co_cellvars: - # the first few cell vars could shadow already-set arguments, - # in the same order as they appear in co_varnames - code = self.code - argvars = code.co_varnames - cellvars = code.co_cellvars - next = 0 - nextname = cellvars[0] - for i in range(len(scope_w)): - if argvars[i] == nextname: - # argument i has the same name as the next cell var - w_value = scope_w[i] - self.cells[next] = Cell(w_value) - next += 1 - try: - nextname = cellvars[next] - except IndexError: - break # all cell vars initialized this way - - def getfreevarname(self, index): - freevarnames = self.code.co_cellvars + self.code.co_freevars - return freevarnames[index] - - def iscellvar(self, index): - # is the variable given by index a cell or a free var? - return index < len(self.code.co_cellvars) - - ### extra opcodes ### - - def LOAD_CLOSURE(f, varindex): - # nested scopes: access the cell object - cell = f.cells[varindex] - w_value = f.space.wrap(cell) - f.valuestack.push(w_value) - - def LOAD_DEREF(f, varindex): - # nested scopes: access a variable through its cell object - cell = f.cells[varindex] - try: - w_value = cell.get() - except ValueError: - varname = f.getfreevarname(varindex) - if f.iscellvar(varindex): - message = "local variable '%s' referenced before assignment" - w_exc_type = f.space.w_UnboundLocalError - else: - message = ("free variable '%s' referenced before assignment" - " in enclosing scope") - w_exc_type = f.space.w_NameError - raise OperationError(w_exc_type, f.space.wrap(message % varname)) - else: - f.valuestack.push(w_value) - - def STORE_DEREF(f, varindex): - # nested scopes: access a variable through its cell object - w_newvalue = f.valuestack.pop() - #try: - cell = f.cells[varindex] - #except IndexError: - # import pdb; pdb.set_trace() - # raise - cell.set(w_newvalue) - - def MAKE_CLOSURE(f, numdefaults): - w_codeobj = f.valuestack.pop() - codeobj = f.space.unwrap(w_codeobj) - assert isinstance(codeobj, pycode.PyCode) - nfreevars = len(codeobj.co_freevars) - freevars = [f.space.unwrap(f.valuestack.pop()) for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [f.valuestack.pop() for i in range(numdefaults)] - defaultarguments.reverse() - fn = function.Function(f.space, codeobj, f.w_globals, - defaultarguments, freevars) - f.valuestack.push(f.space.wrap(fn)) diff --git a/pypy/interpreter/py.py b/pypy/interpreter/py.py deleted file mode 100755 index 9582c0c3f5..0000000000 --- a/pypy/interpreter/py.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python - -try: - import autopath -except ImportError: - pass - -from pypy.tool import option -from pypy.tool.optik import make_option -from pypy.interpreter import main, interactive, error -import sys - -class Options(option.Options): - interactive = 0 - command = [] - -def get_main_options(): - options = option.get_standard_options() - options.append(make_option( - '-i', action="store_true", dest="interactive", - help="inspect interactively after running script")) - - def command_callback(option, opt, value, parser): - parser.values.command = parser.rargs[:] - parser.rargs[:] = [] - - options.append(make_option( - '-c', action="callback", - callback=command_callback, - help="program passed in as CMD (terminates option list)")) - - return options - -def main_(argv=None): - from pypy.tool import tb_server - args = option.process_options(get_main_options(), Options, argv[1:]) - space = None - try: - space = option.objspace() - go_interactive = Options.interactive - banner = '' - if Options.command: - args = ['-c'] - for arg in args: - space.call_method(space.sys.w_argv, 'append', space.wrap(arg)) - if Options.command: - try: - main.run_string(Options.command[0], '<string>', space) - except error.PyPyError, pypyerr: - pypyerr.operationerr.print_detailed_traceback(pypyerr.space) - elif args: - try: - main.run_file(args[0], space) - except error.PyPyError, pypyerr: - pypyerr.operationerr.print_detailed_traceback(pypyerr.space) - else: - go_interactive = 1 - banner = None - if go_interactive: - con = interactive.PyPyConsole(space) - if banner == '': - banner = '%s / %s'%(con.__class__.__name__, - space.__class__.__name__) - con.interact(banner) - except: - exc_type, value, tb = sys.exc_info() - sys.last_type = exc_type - sys.last_value = value - sys.last_traceback = tb - if issubclass(exc_type, SystemExit): - pass # don't print tracebacks for SystemExit - elif isinstance(value, error.OperationError): - value.print_detailed_traceback(space=space) - else: - sys.excepthook(exc_type, value, tb) - tb_server.wait_until_interrupt() - - tb_server.stop() - -if __name__ == '__main__': - try: - import readline - except: - pass - if hasattr(sys, 'setrecursionlimit'): - # for running "python -i py.py -Si -- py.py -Si" - sys.setrecursionlimit(3000) - main_(sys.argv) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py deleted file mode 100644 index 97b5b8c467..0000000000 --- a/pypy/interpreter/pycode.py +++ /dev/null @@ -1,176 +0,0 @@ -""" -Python-style code objects. -PyCode instances have the same co_xxx arguments as CPython code objects. -The bytecode interpreter itself is implemented by the PyFrame class. -""" - -import dis -from pypy.interpreter import eval -from pypy.tool.cache import Cache - -# code object contants, for co_flags below -CO_OPTIMIZED = 0x0001 -CO_NEWLOCALS = 0x0002 -CO_VARARGS = 0x0004 -CO_VARKEYWORDS = 0x0008 -CO_NESTED = 0x0010 -CO_GENERATOR = 0x0020 - -class PyCode(eval.Code): - "CPython-style code objects." - - def __init__(self, co_name=''): - eval.Code.__init__(self, co_name) - self.co_argcount = 0 # #arguments, except *vararg and **kwarg - self.co_nlocals = 0 # #local variables - self.co_stacksize = 0 # #entries needed for evaluation stack - self.co_flags = 0 # CO_..., see above - self.co_code = None # string: instruction opcodes - self.co_consts = () # tuple: constants used - self.co_names = () # tuple of strings: names (for attrs,...) - self.co_varnames = () # tuple of strings: local variable names - self.co_freevars = () # tuple of strings: free variable names - self.co_cellvars = () # tuple of strings: cell variable names - # The rest doesn't count for hash/cmp - self.co_filename = "" # string: where it was loaded from - #self.co_name (in base class)# string: name, for reference - self.co_firstlineno = 0 # first source line number - self.co_lnotab = "" # string: encoding addr<->lineno mapping - - def _from_code(self, code): - """ Initialize the code object from a real (CPython) one. - This is just a hack, until we have our own compile. - At the moment, we just fake this. - This method is called by our compile builtin function. - """ - import types - assert isinstance(code, types.CodeType) - # simply try to suck in all attributes we know of - # with a lot of boring asserts to enforce type knowledge - # XXX get rid of that ASAP with a real compiler! - x = code.co_argcount; assert isinstance(x, int) - self.co_argcount = x - x = code.co_nlocals; assert isinstance(x, int) - self.co_nlocals = x - x = code.co_stacksize; assert isinstance(x, int) - self.co_stacksize = x - x = code.co_flags; assert isinstance(x, int) - self.co_flags = x - x = code.co_code; assert isinstance(x, str) - self.co_code = x - #self.co_consts = <see below> - x = code.co_names; assert isinstance(x, tuple) - self.co_names = x - x = code.co_varnames; assert isinstance(x, tuple) - self.co_varnames = x - x = code.co_freevars; assert isinstance(x, tuple) - self.co_freevars = x - x = code.co_cellvars; assert isinstance(x, tuple) - self.co_cellvars = x - x = code.co_filename; assert isinstance(x, str) - self.co_filename = x - x = code.co_name; assert isinstance(x, str) - self.co_name = x - x = code.co_firstlineno; assert isinstance(x, int) - self.co_firstlineno = x - x = code.co_lnotab; assert isinstance(x, str) - self.co_lnotab = x - # recursively _from_code()-ify the code objects in code.co_consts - newconsts = [] - for const in code.co_consts: - if isinstance(const, types.CodeType): - const = PyCode()._from_code(const) - newconsts.append(const) - self.co_consts = tuple(newconsts) - return self - - def create_frame(self, space, w_globals, closure=None): - "Create an empty PyFrame suitable for this code object." - # select the appropriate kind of frame - from pypy.interpreter.pyopcode import PyInterpFrame as Frame - if self.co_cellvars or self.co_freevars: - from pypy.interpreter.nestedscope import PyNestedScopeFrame as F - Frame = enhanceclass(Frame, F) - if self.co_flags & CO_GENERATOR: - from pypy.interpreter.generator import GeneratorFrame as F - Frame = enhanceclass(Frame, F) - return Frame(space, self, w_globals, closure) - - def signature(self): - "([list-of-arg-names], vararg-name-or-None, kwarg-name-or-None)." - argcount = self.co_argcount - argnames = list(self.co_varnames[:argcount]) - if self.co_flags & CO_VARARGS: - varargname = self.co_varnames[argcount] - argcount += 1 - else: - varargname = None - if self.co_flags & CO_VARKEYWORDS: - kwargname = self.co_varnames[argcount] - argcount += 1 - else: - kwargname = None - return argnames, varargname, kwargname - - def getvarnames(self): - return self.co_varnames - - def getdocstring(self): - if self.co_consts: # it is probably never empty - return self.co_consts[0] - else: - return None - - def dictscope_needed(self): - # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. - # class bodies only have CO_NEWLOCALS. - return not (self.co_flags & CO_OPTIMIZED) - - def getjoinpoints(self): - """Compute the bytecode positions that are potential join points - (for FlowObjSpace)""" - # first approximation - return dis.findlabels(self.co_code) - - def descr_code__new__(space, w_subtype, - w_argcount, w_nlocals, w_stacksize, w_flags, - w_codestring, w_constants, w_names, - w_varnames, w_filename, w_name, w_firstlineno, - w_lnotab, w_freevars=None, w_cellvars=None): - code = space.allocate_instance(PyCode, w_subtype) - code.__init__() - # XXX typechecking everywhere! - code.co_argcount = space.unwrap(w_argcount) - code.co_nlocals = space.unwrap(w_nlocals) - code.co_stacksize = space.unwrap(w_stacksize) - code.co_flags = space.unwrap(w_flags) - code.co_code = space.unwrap(w_codestring) - code.co_consts = space.unwrap(w_constants) - code.co_names = space.unwrap(w_names) - code.co_varnames = space.unwrap(w_varnames) - code.co_filename = space.unwrap(w_filename) - code.co_name = space.unwrap(w_name) - code.co_firstlineno= space.unwrap(w_firstlineno) - code.co_lnotab = space.unwrap(w_lnotab) - if w_freevars is not None: - code.co_freevars = space.unwrap(w_freevars) - if w_cellvars is not None: - code.co_cellvars = space.unwrap(w_cellvars) - return space.wrap(code) - -def _really_enhanceclass(key, stuff): - return type("Mixed", key, {}) - -def enhanceclass(baseclass, newclass, cache=Cache()): - # this is a bit too dynamic for RPython, but it looks nice - # and I assume that we can easily change it into a static - # pre-computed table - if issubclass(newclass, baseclass): - return newclass - else: - return cache.getorbuild((newclass, baseclass), - _really_enhanceclass, None) - - -def cpython_code_signature(co): - return PyCode()._from_code(co).signature() diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py deleted file mode 100644 index b20c436260..0000000000 --- a/pypy/interpreter/pyframe.py +++ /dev/null @@ -1,363 +0,0 @@ -""" PyFrame class implementation with the interpreter main loop. -""" - -from pypy.interpreter import eval, baseobjspace, gateway -from pypy.interpreter.miscutils import Stack -from pypy.interpreter.error import OperationError -from pypy.interpreter import pytraceback - - -class PyFrame(eval.Frame): - """Represents a frame for a regular Python function - that needs to be interpreted. - - See also pyopcode.PyStandardFrame and pynestedscope.PyNestedScopeFrame. - - Public fields: - * 'space' is the object space this frame is running in - * 'code' is the PyCode object this frame runs - * 'w_locals' is the locals dictionary to use - * 'w_globals' is the attached globals dictionary - * 'w_builtins' is the attached built-ins dictionary - * 'valuestack', 'blockstack', 'next_instr' control the interpretation - """ - - def __init__(self, space, code, w_globals, closure): - eval.Frame.__init__(self, space, code, w_globals, code.co_nlocals) - self.valuestack = Stack() - self.blockstack = Stack() - self.last_exception = None - self.next_instr = 0 - self.w_builtins = self.space.w_builtins - # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. - # class bodies only have CO_NEWLOCALS. - if code.dictscope_needed(): - self.w_locals = space.newdict([]) # set to None by Frame.__init__ - - self.fastlocals_w = [eval.UNDEFINED]*self.numlocals - - def getfastscope(self): - "Get the fast locals as a list." - return self.fastlocals_w - - def setfastscope(self, scope_w): - """Initialize the fast locals from a list of values, - where the order is according to self.code.signature().""" - if len(scope_w) > len(self.fastlocals_w): - raise ValueError, "new fastscope is longer than the allocated area" - self.fastlocals_w[:len(scope_w)] = scope_w - - def getclosure(self): - return None - - def eval(self, executioncontext): - "Interpreter main loop!" - try: - while True: - executioncontext.bytecode_trace(self) - last_instr = self.next_instr - try: - try: - # fetch and dispatch the next opcode - # dispatch() is abstract, see pyopcode. - self.dispatch() - except OperationError, e: - pytraceback.record_application_traceback( - self.space, e, self, last_instr) - executioncontext.exception_trace(e) - # convert an OperationError into a control flow - # exception - import sys - tb = sys.exc_info()[2] - raise SApplicationException(e, tb) - # XXX some other exceptions could be caught here too, - # like KeyboardInterrupt - - except ControlFlowException, ctlflowexc: - # we have a reason to change the control flow - # (typically unroll the stack) - ctlflowexc.action(self, last_instr, executioncontext) - - except ExitFrame, e: - # leave that frame - w_exitvalue = e.args[0] - return w_exitvalue - - ### exception stack ### - - def clean_exceptionstack(self): - # remove all exceptions that can no longer be re-raised - # because the current valuestack is no longer deep enough - # to hold the corresponding information - while self.exceptionstack: - ctlflowexc, valuestackdepth = self.exceptionstack.top() - if valuestackdepth <= self.valuestack.depth(): - break - self.exceptionstack.pop() - - ### line numbers ### - - def fget_f_lineno(space, w_self): - "Returns the line number of the instruction currently being executed." - self = space.unwrap_builtin(w_self) - return space.wrap(self.get_last_lineno()) - - def get_last_lineno(self): - "Returns the line number of the instruction currently being executed." - return pytraceback.offset2lineno(self.code, self.next_instr-1) - - def get_next_lineno(self): - "Returns the line number of the next instruction to execute." - return pytraceback.offset2lineno(self.code, self.next_instr) - - -### Frame Blocks ### - -class FrameBlock: - - """Abstract base class for frame blocks from the blockstack, - used by the SETUP_XXX and POP_BLOCK opcodes.""" - - def __init__(self, frame, handlerposition): - self.handlerposition = handlerposition - self.valuestackdepth = frame.valuestack.depth() - - def __eq__(self, other): - return (self.__class__ is other.__class__ and - self.handlerposition == other.handlerposition and - self.valuestackdepth == other.valuestackdepth) - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return hash((self.handlerposition, self.valuestackdepth)) - - def cleanupstack(self, frame): - for i in range(self.valuestackdepth, frame.valuestack.depth()): - frame.valuestack.pop() - - def cleanup(self, frame): - "Clean up a frame when we normally exit the block." - self.cleanupstack(frame) - - def unroll(self, frame, unroller): - "Clean up a frame when we abnormally exit the block." - self.cleanupstack(frame) - return False # continue to unroll - - -class LoopBlock(FrameBlock): - """A loop block. Stores the end-of-loop pointer in case of 'break'.""" - - def unroll(self, frame, unroller): - if isinstance(unroller, SContinueLoop): - # re-push the loop block without cleaning up the value stack, - # and jump to the beginning of the loop, stored in the - # exception's argument - frame.blockstack.push(self) - jump_to = unroller.args[0] - frame.next_instr = jump_to - return True # stop unrolling - self.cleanupstack(frame) - if isinstance(unroller, SBreakLoop): - # jump to the end of the loop - frame.next_instr = self.handlerposition - return True # stop unrolling - return False - - -class ExceptBlock(FrameBlock): - """An try:except: block. Stores the position of the exception handler.""" - - def unroll(self, frame, unroller): - self.cleanupstack(frame) - if isinstance(unroller, SApplicationException): - # push the exception to the value stack for inspection by the - # exception handler (the code after the except:) - operationerr = unroller.args[0] - w_type = operationerr.w_type - w_value = operationerr.w_value - w_normalized = normalize_exception(frame.space, w_type, w_value, - frame.space.w_None) - w_type, w_value, w_tb = frame.space.unpacktuple(w_normalized, 3) - # save the normalized exception back into the OperationError - # -- in particular it makes sure that sys.exc_info() etc see - # normalized exception. - operationerr.w_type = w_type - operationerr.w_value = w_value - # the stack setup is slightly different than in CPython: - # instead of the traceback, we store the unroller object, - # wrapped. - frame.valuestack.push(frame.space.wrap(unroller)) - frame.valuestack.push(w_value) - frame.valuestack.push(w_type) - frame.next_instr = self.handlerposition # jump to the handler - return True # stop unrolling - return False - -def app_normalize_exception(etype, value, tb): - """Normalize an (exc_type, exc_value) pair: - exc_value will be an exception instance and exc_type its class. - """ - # mistakes here usually show up as infinite recursion, which is fun. - while isinstance(etype, tuple): - etype = etype[0] - if isinstance(etype, type): - if not isinstance(value, etype): - if value is None: - # raise Type: we assume we have to instantiate Type - value = etype() - elif isinstance(value, tuple): - # raise Type, Tuple: assume Tuple contains the constructor args - value = etype(*value) - else: - # raise Type, X: assume X is the constructor argument - value = etype(value) - # raise Type, Instance: let etype be the exact type of value - etype = value.__class__ - elif type(etype) is str: - # XXX warn -- deprecated - pass - else: - # raise X: we assume that X is an already-built instance - if value is not None: - raise TypeError("instance exception may not have a separate value") - value = etype - etype = value.__class__ - # for the sake of language consistency we should not allow - # things like 'raise 1', but it's probably fine (i.e. - # not ambiguous) to allow them in the explicit form 'raise int, 1' - if not hasattr(value, '__dict__') and not hasattr(value, '__slots__'): - raise TypeError("raising built-in objects can be ambiguous, " - "use 'raise type, value' instead") - return etype, value, tb -normalize_exception = gateway.app2interp(app_normalize_exception) - - -class FinallyBlock(FrameBlock): - """A try:finally: block. Stores the position of the exception handler.""" - - def cleanup(self, frame): - # upon normal entry into the finally: part, the standard Python - # bytecode pushes a single None for END_FINALLY. In our case we - # always push three values into the stack: the wrapped ctlflowexc, - # the exception value and the exception type (which are all None - # here). - self.cleanupstack(frame) - # one None already pushed by the bytecode - frame.valuestack.push(frame.space.w_None) - frame.valuestack.push(frame.space.w_None) - - def unroll(self, frame, unroller): - # any abnormal reason for unrolling a finally: triggers the end of - # the block unrolling and the entering the finally: handler. - # see comments in cleanup(). - self.cleanupstack(frame) - frame.valuestack.push(frame.space.wrap(unroller)) - frame.valuestack.push(frame.space.w_None) - frame.valuestack.push(frame.space.w_None) - frame.next_instr = self.handlerposition # jump to the handler - return True # stop unrolling - - -### Internal exceptions that change the control flow ### -### and (typically) unroll the block stack ### - -class ControlFlowException(Exception, baseobjspace.BaseWrappable): - """Abstract base class for interpreter-level exceptions that - instruct the interpreter to change the control flow and the - block stack. - - The concrete subclasses correspond to the various values WHY_XXX - values of the why_code enumeration in ceval.c: - - WHY_NOT, OK, not this one :-) - WHY_EXCEPTION, SApplicationException - WHY_RERAISE, we don't think this is needed - WHY_RETURN, SReturnValue - WHY_BREAK, SBreakLoop - WHY_CONTINUE, SContinueLoop - WHY_YIELD SYieldValue - - """ - def action(self, frame, last_instr, executioncontext): - "Default unroller implementation." - while not frame.blockstack.empty(): - block = frame.blockstack.pop() - if block.unroll(frame, self): - break - else: - self.emptystack(frame) - - def emptystack(self, frame): - "Default behavior when the block stack is exhausted." - # could occur e.g. when a BREAK_LOOP is not actually within a loop - raise BytecodeCorruption, "block stack exhausted" - - # for the flow object space, a way to "pickle" and "unpickle" the - # ControlFlowException by enumerating the Variables it contains. - def state_unpack_variables(self, space): - return [] # by default, overridden below - def state_pack_variables(self, space, *values_w): - assert len(values_w) == 0 - -class SApplicationException(ControlFlowException): - """Unroll the stack because of an application-level exception - (i.e. an OperationException).""" - - def action(self, frame, last_instr, executioncontext): - e = self.args[0] - frame.last_exception = e - - ControlFlowException.action(self, frame, - last_instr, executioncontext) - - def emptystack(self, frame): - # propagate the exception to the caller - if len(self.args) == 2: - operationerr, tb = self.args - raise operationerr.__class__, operationerr, tb - else: - operationerr = self.args[0] - raise operationerr - - def state_unpack_variables(self, space): - e = self.args[0] - assert isinstance(e, OperationError) - return [e.w_type, e.w_value] - def state_pack_variables(self, space, w_type, w_value): - self.args = (OperationError(w_type, w_value),) - -class SBreakLoop(ControlFlowException): - """Signals a 'break' statement.""" - -class SContinueLoop(ControlFlowException): - """Signals a 'continue' statement. - Argument is the bytecode position of the beginning of the loop.""" - - def state_unpack_variables(self, space): - jump_to = self.args[0] - return [space.wrap(jump_to)] - def state_pack_variables(self, space, w_jump_to): - self.args = (space.unwrap(w_jump_to),) - -class SReturnValue(ControlFlowException): - """Signals a 'return' statement. - Argument is the wrapped object to return.""" - def emptystack(self, frame): - w_returnvalue = self.args[0] - raise ExitFrame(w_returnvalue) - - def state_unpack_variables(self, space): - w_returnvalue = self.args[0] - return [w_returnvalue] - def state_pack_variables(self, space, w_returnvalue): - self.args = (w_returnvalue,) - -class ExitFrame(Exception): - """Signals the end of the frame execution. - The argument is the returned or yielded value, already wrapped.""" - -class BytecodeCorruption(ValueError): - """Detected bytecode corruption. Never caught; it's an error.""" diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py deleted file mode 100644 index bf17ac744b..0000000000 --- a/pypy/interpreter/pyopcode.py +++ /dev/null @@ -1,868 +0,0 @@ -""" -Implementation of a part of the standard Python opcodes. -The rest, dealing with variables in optimized ways, is in -pyfastscope.py and pynestedscope.py. -""" - -from pypy.interpreter.baseobjspace import OperationError -from pypy.interpreter.eval import UNDEFINED -from pypy.interpreter import gateway, function -from pypy.interpreter import pyframe, pytraceback -from pypy.interpreter.miscutils import InitializedClass -from pypy.interpreter.argument import Arguments - - -class unaryoperation: - def __init__(self, operationname): - self.operationname = operationname - def __call__(self, f): - operation = getattr(f.space, self.operationname) - w_1 = f.valuestack.pop() - w_result = operation(w_1) - f.valuestack.push(w_result) - -class binaryoperation: - def __init__(self, operationname): - self.operationname = operationname - def __call__(self, f): - operation = getattr(f.space, self.operationname) - w_2 = f.valuestack.pop() - w_1 = f.valuestack.pop() - w_result = operation(w_1, w_2) - f.valuestack.push(w_result) - - -class PyInterpFrame(pyframe.PyFrame): - """A PyFrame that knows about interpretation of standard Python opcodes - minus the ones related to nested scopes.""" - - ### opcode dispatch ### - - # 'dispatch_table' is a class attribute: a list of functions. - # Currently, it is always created by setup_dispatch_table in pyopcode.py - # but it could be a custom table. - - def dispatch(self): - opcode = self.nextop() - fn = self.dispatch_table[opcode] - if fn.has_arg: - oparg = self.nextarg() - fn(self, oparg) - - else: - fn(self) - - def nextop(self): - c = self.code.co_code[self.next_instr] - self.next_instr += 1 - return ord(c) - - def nextarg(self): - lo = self.nextop() - hi = self.nextop() - return (hi<<8) + lo - - ### accessor functions ### - - def getlocalvarname(self, index): - return self.code.co_varnames[index] - - def getconstant(self, index): - return self.code.co_consts[index] - - def getname(self, index): - return self.code.co_names[index] - - - ################################################################ - ## Implementation of the "operational" opcodes - ## See also pyfastscope.py and pynestedscope.py for the rest. - ## - - # the 'self' argument of opcode implementations is called 'f' - # for historical reasons - - def NOP(f): - pass - - def LOAD_FAST(f, varindex): - # access a local variable directly - w_value = f.fastlocals_w[varindex] - if w_value is UNDEFINED: - varname = f.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" % varname - raise OperationError(f.space.w_UnboundLocalError, f.space.wrap(message)) - f.valuestack.push(w_value) - - def LOAD_CONST(f, constindex): - w_const = f.space.wrap(f.getconstant(constindex)) - f.valuestack.push(w_const) - - def STORE_FAST(f, varindex): - w_newvalue = f.valuestack.pop() - f.fastlocals_w[varindex] = w_newvalue - #except: - # print "exception: got index error" - # print " varindex:", varindex - # print " len(locals_w)", len(f.locals_w) - # import dis - # print dis.dis(f.code) - # print "co_varnames", f.code.co_varnames - # print "co_nlocals", f.code.co_nlocals - # raise - - def POP_TOP(f): - f.valuestack.pop() - - def ROT_TWO(f): - w_1 = f.valuestack.pop() - w_2 = f.valuestack.pop() - f.valuestack.push(w_1) - f.valuestack.push(w_2) - - def ROT_THREE(f): - w_1 = f.valuestack.pop() - w_2 = f.valuestack.pop() - w_3 = f.valuestack.pop() - f.valuestack.push(w_1) - f.valuestack.push(w_3) - f.valuestack.push(w_2) - - def ROT_FOUR(f): - w_1 = f.valuestack.pop() - w_2 = f.valuestack.pop() - w_3 = f.valuestack.pop() - w_4 = f.valuestack.pop() - f.valuestack.push(w_1) - f.valuestack.push(w_4) - f.valuestack.push(w_3) - f.valuestack.push(w_2) - - def DUP_TOP(f): - w_1 = f.valuestack.top() - f.valuestack.push(w_1) - - def DUP_TOPX(f, itemcount): - assert 1 <= itemcount <= 5, "limitation of the current interpreter" - for i in range(itemcount): - w_1 = f.valuestack.top(itemcount-1) - f.valuestack.push(w_1) - - UNARY_POSITIVE = unaryoperation("pos") - UNARY_NEGATIVE = unaryoperation("neg") - UNARY_NOT = unaryoperation("not_") - UNARY_CONVERT = unaryoperation("repr") - UNARY_INVERT = unaryoperation("invert") - - def BINARY_POWER(f): - w_2 = f.valuestack.pop() - w_1 = f.valuestack.pop() - w_result = f.space.pow(w_1, w_2, f.space.w_None) - f.valuestack.push(w_result) - - BINARY_MULTIPLY = binaryoperation("mul") - BINARY_TRUE_DIVIDE = binaryoperation("truediv") - BINARY_FLOOR_DIVIDE = binaryoperation("floordiv") - BINARY_DIVIDE = binaryoperation("div") - BINARY_MODULO = binaryoperation("mod") - BINARY_ADD = binaryoperation("add") - BINARY_SUBTRACT = binaryoperation("sub") - BINARY_SUBSCR = binaryoperation("getitem") - BINARY_LSHIFT = binaryoperation("lshift") - BINARY_RSHIFT = binaryoperation("rshift") - BINARY_AND = binaryoperation("and_") - BINARY_XOR = binaryoperation("xor") - BINARY_OR = binaryoperation("or_") - - def INPLACE_POWER(f): - w_2 = f.valuestack.pop() - w_1 = f.valuestack.pop() - w_result = f.space.inplace_pow(w_1, w_2, f.space.w_None) - f.valuestack.push(w_result) - - INPLACE_MULTIPLY = binaryoperation("inplace_mul") - INPLACE_TRUE_DIVIDE = binaryoperation("inplace_truediv") - INPLACE_FLOOR_DIVIDE = binaryoperation("inplace_floordiv") - INPLACE_DIVIDE = binaryoperation("inplace_div") - INPLACE_MODULO = binaryoperation("inplace_mod") - INPLACE_ADD = binaryoperation("inplace_add") - INPLACE_SUBTRACT = binaryoperation("inplace_sub") - INPLACE_LSHIFT = binaryoperation("inplace_lshift") - INPLACE_RSHIFT = binaryoperation("inplace_rshift") - INPLACE_AND = binaryoperation("inplace_and") - INPLACE_XOR = binaryoperation("inplace_xor") - INPLACE_OR = binaryoperation("inplace_or") - - def slice(f, w_start, w_end): - w_slice = f.space.newslice(w_start, w_end, None) - w_obj = f.valuestack.pop() - w_result = f.space.getitem(w_obj, w_slice) - f.valuestack.push(w_result) - - def SLICE_0(f): - f.slice(None, None) - - def SLICE_1(f): - w_start = f.valuestack.pop() - f.slice(w_start, None) - - def SLICE_2(f): - w_end = f.valuestack.pop() - f.slice(None, w_end) - - def SLICE_3(f): - w_end = f.valuestack.pop() - w_start = f.valuestack.pop() - f.slice(w_start, w_end) - - def storeslice(f, w_start, w_end): - w_slice = f.space.newslice(w_start, w_end, None) - w_obj = f.valuestack.pop() - w_newvalue = f.valuestack.pop() - f.space.setitem(w_obj, w_slice, w_newvalue) - - def STORE_SLICE_0(f): - f.storeslice(None, None) - - def STORE_SLICE_1(f): - w_start = f.valuestack.pop() - f.storeslice(w_start, None) - - def STORE_SLICE_2(f): - w_end = f.valuestack.pop() - f.storeslice(None, w_end) - - def STORE_SLICE_3(f): - w_end = f.valuestack.pop() - w_start = f.valuestack.pop() - f.storeslice(w_start, w_end) - - def deleteslice(f, w_start, w_end): - w_slice = f.space.newslice(w_start, w_end, None) - w_obj = f.valuestack.pop() - f.space.delitem(w_obj, w_slice) - - def DELETE_SLICE_0(f): - f.deleteslice(None, None) - - def DELETE_SLICE_1(f): - w_start = f.valuestack.pop() - f.deleteslice(w_start, None) - - def DELETE_SLICE_2(f): - w_end = f.valuestack.pop() - f.deleteslice(None, w_end) - - def DELETE_SLICE_3(f): - w_end = f.valuestack.pop() - w_start = f.valuestack.pop() - f.deleteslice(w_start, w_end) - - def STORE_SUBSCR(f): - "obj[subscr] = newvalue" - w_subscr = f.valuestack.pop() - w_obj = f.valuestack.pop() - w_newvalue = f.valuestack.pop() - f.space.setitem(w_obj, w_subscr, w_newvalue) - - def DELETE_SUBSCR(f): - "del obj[subscr]" - w_subscr = f.valuestack.pop() - w_obj = f.valuestack.pop() - f.space.delitem(w_obj, w_subscr) - - def PRINT_EXPR(f): - w_expr = f.valuestack.pop() - print_expr(f.space, w_expr) - - def PRINT_ITEM_TO(f): - w_stream = f.valuestack.pop() - w_item = f.valuestack.pop() - if w_stream == f.space.w_None: - w_stream = sys_stdout(f.space) # grumble grumble special cases - print_item_to(f.space, w_item, w_stream) - - def PRINT_ITEM(f): - w_item = f.valuestack.pop() - print_item_to(f.space, w_item, sys_stdout(f.space)) - - def PRINT_NEWLINE_TO(f): - w_stream = f.valuestack.pop() - if w_stream == f.space.w_None: - w_stream = sys_stdout(f.space) # grumble grumble special cases - print_newline_to(f.space, w_stream) - - def PRINT_NEWLINE(f): - print_newline_to(f.space, sys_stdout(f.space)) - - def BREAK_LOOP(f): - raise pyframe.SBreakLoop - - def CONTINUE_LOOP(f, startofloop): - raise pyframe.SContinueLoop(startofloop) - - def RAISE_VARARGS(f, nbargs): - # we use the .app.py file to prepare the exception/value/traceback - # but not to actually raise it, because we cannot use the 'raise' - # statement to implement RAISE_VARARGS - if nbargs == 0: - operror = f.space.getexecutioncontext().sys_exc_info() - if operror is None: - raise OperationError(f.space.w_TypeError, - f.space.wrap("raise: no active exception to re-raise")) - # re-raise, no new traceback obj will be attached - raise pyframe.SApplicationException(operror) - w_value = w_traceback = f.space.w_None - if nbargs >= 3: w_traceback = f.valuestack.pop() - if nbargs >= 2: w_value = f.valuestack.pop() - if 1: w_type = f.valuestack.pop() - w_resulttuple = pyframe.normalize_exception(f.space, w_type, w_value, - w_traceback) - w_type, w_value, w_traceback = f.space.unpacktuple(w_resulttuple, 3) - tb = f.space.unwrap(w_traceback) - if tb is not None: - if not isinstance(tb,pytraceback.PyTraceback): - raise OperationError(f.space.w_TypeError, - f.space.wrap("raise: arg 3 must be a traceback or None")) - operror = OperationError(w_type,w_value,tb) - # re-raise, no new traceback obj will be attached - raise pyframe.SApplicationException(operror) - else: - # common-case - raise OperationError(w_type, w_value) - - def LOAD_LOCALS(f): - f.valuestack.push(f.w_locals) - - def RETURN_VALUE(f): - w_returnvalue = f.valuestack.pop() - raise pyframe.SReturnValue(w_returnvalue) - - def EXEC_STMT(f): - w_locals = f.valuestack.pop() - w_globals = f.valuestack.pop() - w_prog = f.valuestack.pop() - w_resulttuple = f.prepare_exec(w_prog, w_globals, w_locals) - w_prog, w_globals, w_locals = f.space.unpacktuple(w_resulttuple, 3) - - plain = f.space.is_true(f.space.is_(w_locals, f.w_locals)) - if plain: - w_locals = f.getdictscope() - pycode = f.space.unwrap(w_prog) - pycode.exec_code(f.space, w_globals, w_locals) - if plain: - f.setdictscope(w_locals) - - def app_prepare_exec(f, prog, globals, locals): - """Manipulate parameters to exec statement to (codeobject, dict, dict). - """ - # XXX INCOMPLETE - if (globals is None and locals is None and - isinstance(prog, tuple) and - (len(prog) == 2 or len(prog) == 3)): - globals = prog[1] - if len(prog) == 3: - locals = prog[2] - prog = prog[0] - if globals is None: - globals = f.f_globals - if locals is None: - locals = f.f_locals - if locals is None: - locals = globals - if not isinstance(globals, dict): - raise TypeError("exec: arg 2 must be a dictionary or None") - elif not globals.has_key('__builtins__'): - globals['__builtins__'] = f.f_builtins - if not isinstance(locals, dict): - raise TypeError("exec: arg 3 must be a dictionary or None") - # XXX - HACK to check for code object - co = compile('1','<string>','eval') - if isinstance(prog, type(co)): - return (prog, globals, locals) - if not isinstance(prog, str): - ## if not (isinstance(prog, types.StringTypes) or - ## isinstance(prog, types.FileType)): - raise TypeError("exec: arg 1 must be a string, file, or code object") - ## if isinstance(prog, types.FileType): - ## flags = 0 - ## ## XXX add in parent flag merging - ## co = compile(prog.read(),prog.name,'exec',flags,1) - ## return (co,globals,locals) - else: # prog is a string - flags = 0 - ## XXX add in parent flag merging - co = compile(prog,'<string>','exec',flags,1) - return (co, globals, locals) - - def POP_BLOCK(f): - block = f.blockstack.pop() - block.cleanup(f) # the block knows how to clean up the value stack - - def END_FINALLY(f): - # unlike CPython, when we reach this opcode the value stack has - # always been set up as follows (topmost first): - # [exception type or None] - # [exception value or None] - # [wrapped stack unroller ] - f.valuestack.pop() # ignore the exception type - f.valuestack.pop() # ignore the exception value - unroller = f.space.unwrap(f.valuestack.pop()) - if unroller is not None: - raise unroller # re-raise the unroller, if any - - def BUILD_CLASS(f): - w_methodsdict = f.valuestack.pop() - w_bases = f.valuestack.pop() - w_name = f.valuestack.pop() - w_metaclass = find_metaclass(f.space, w_bases, - w_methodsdict, f.w_globals) - w_newclass = f.space.call_function(w_metaclass, w_name, - w_bases, w_methodsdict) - f.valuestack.push(w_newclass) - - def STORE_NAME(f, varindex): - varname = f.getname(varindex) - w_varname = f.space.wrap(varname) - w_newvalue = f.valuestack.pop() - f.space.setitem(f.w_locals, w_varname, w_newvalue) - - def DELETE_NAME(f, varindex): - varname = f.getname(varindex) - w_varname = f.space.wrap(varname) - try: - f.space.delitem(f.w_locals, w_varname) - except OperationError, e: - # catch KeyErrors and turn them into NameErrors - if not e.match(f.space, f.space.w_KeyError): - raise - message = "name '%s' is not defined" % varname - raise OperationError(f.space.w_NameError, f.space.wrap(message)) - - def UNPACK_SEQUENCE(f, itemcount): - w_iterable = f.valuestack.pop() - try: - items = f.space.unpackiterable(w_iterable, itemcount) - except ValueError, e: - raise OperationError(f.space.w_ValueError, f.space.wrap(str(e))) - items.reverse() - for item in items: - f.valuestack.push(item) - - def STORE_ATTR(f, nameindex): - "obj.attributename = newvalue" - attributename = f.getname(nameindex) - w_attributename = f.space.wrap(attributename) - w_obj = f.valuestack.pop() - w_newvalue = f.valuestack.pop() - f.space.setattr(w_obj, w_attributename, w_newvalue) - - def DELETE_ATTR(f, nameindex): - "del obj.attributename" - attributename = f.getname(nameindex) - w_attributename = f.space.wrap(attributename) - w_obj = f.valuestack.pop() - f.space.delattr(w_obj, w_attributename) - - def STORE_GLOBAL(f, nameindex): - varname = f.getname(nameindex) - w_varname = f.space.wrap(varname) - w_newvalue = f.valuestack.pop() - f.space.setitem(f.w_globals, w_varname, w_newvalue) - - def DELETE_GLOBAL(f, nameindex): - varname = f.getname(nameindex) - w_varname = f.space.wrap(varname) - f.space.delitem(f.w_globals, w_varname) - - def LOAD_NAME(f, nameindex): - varname = f.getname(nameindex) - w_varname = f.space.wrap(varname) - - if f.w_globals is f.w_locals: - try_list_w = [f.w_globals, f.w_builtins] - - else: - try_list_w = [f.w_locals, f.w_globals, f.w_builtins] - - w_value = None - for wrapped in try_list_w: - try: - w_value = f.space.getitem(wrapped, w_varname) - f.valuestack.push(w_value) - return - - except OperationError, e: - if not e.match(f.space, f.space.w_KeyError): - raise - message = "name '%s' is not defined" % varname - w_exc_type = f.space.w_NameError - w_exc_value = f.space.wrap(message) - raise OperationError(w_exc_type, w_exc_value) - - - # XXX the implementation can be pushed back into app-space as an - # when exception handling begins to behave itself. For now, it - # was getting on my nerves -- mwh - # w_value = f.load_name(w_varname) - # f.valuestack.push(w_value) - - def LOAD_GLOBAL(f, nameindex): - assert f.w_globals is not None - varname = f.getname(nameindex) - w_varname = f.space.wrap(varname) - try: - w_value = f.space.getitem(f.w_globals, w_varname) - except OperationError, e: - # catch KeyErrors - if not e.match(f.space, f.space.w_KeyError): - raise - # we got a KeyError, now look in the built-ins - try: - w_value = f.space.getitem(f.w_builtins, w_varname) - except OperationError, e: - # catch KeyErrors again - if not e.match(f.space, f.space.w_KeyError): - raise - message = "global name '%s' is not defined" % varname - w_exc_type = f.space.w_NameError - w_exc_value = f.space.wrap(message) - raise OperationError(w_exc_type, w_exc_value) - f.valuestack.push(w_value) - - def DELETE_FAST(f, varindex): - if f.fastlocals_w[varindex] is UNDEFINED: - varname = f.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" % varname - raise OperationError(f.space.w_UnboundLocalError, f.space.wrap(message)) - f.fastlocals_w[varindex] = UNDEFINED - - def BUILD_TUPLE(f, itemcount): - items = [f.valuestack.pop() for i in range(itemcount)] - items.reverse() - w_tuple = f.space.newtuple(items) - f.valuestack.push(w_tuple) - - def BUILD_LIST(f, itemcount): - items = [f.valuestack.pop() for i in range(itemcount)] - items.reverse() - w_list = f.space.newlist(items) - f.valuestack.push(w_list) - - def BUILD_MAP(f, zero): - if zero != 0: - raise pyframe.BytecodeCorruption - w_dict = f.space.newdict([]) - f.valuestack.push(w_dict) - - def LOAD_ATTR(f, nameindex): - "obj.attributename" - attributename = f.getname(nameindex) - w_attributename = f.space.wrap(attributename) - w_obj = f.valuestack.pop() - w_value = f.space.getattr(w_obj, w_attributename) - f.valuestack.push(w_value) - - def cmp_lt(f, w_1, w_2): return f.space.lt(w_1, w_2) - def cmp_le(f, w_1, w_2): return f.space.le(w_1, w_2) - def cmp_eq(f, w_1, w_2): return f.space.eq(w_1, w_2) - def cmp_ne(f, w_1, w_2): return f.space.ne(w_1, w_2) - def cmp_gt(f, w_1, w_2): return f.space.gt(w_1, w_2) - def cmp_ge(f, w_1, w_2): return f.space.ge(w_1, w_2) - - def cmp_in(f, w_1, w_2): - return f.space.contains(w_2, w_1) - def cmp_not_in(f, w_1, w_2): - return f.space.not_(f.space.contains(w_2, w_1)) - def cmp_is(f, w_1, w_2): - return f.space.is_(w_1, w_2) - def cmp_is_not(f, w_1, w_2): - return f.space.not_(f.space.is_(w_1, w_2)) - def cmp_exc_match(f, w_1, w_2): - return f.space.newbool(f.space.exception_match(w_1, w_2)) - - compare_dispatch_table = { - 0: cmp_lt, # "<" - 1: cmp_le, # "<=" - 2: cmp_eq, # "==" - 3: cmp_ne, # "!=" - 4: cmp_gt, # ">" - 5: cmp_ge, # ">=" - 6: cmp_in, - 7: cmp_not_in, - 8: cmp_is, - 9: cmp_is_not, - 10: cmp_exc_match, - } - def COMPARE_OP(f, testnum): - w_2 = f.valuestack.pop() - w_1 = f.valuestack.pop() - try: - testfn = f.compare_dispatch_table[testnum] - except KeyError: - raise pyframe.BytecodeCorruption, "bad COMPARE_OP oparg" - w_result = testfn(f, w_1, w_2) - f.valuestack.push(w_result) - - def IMPORT_NAME(f, nameindex): - space = f.space - modulename = f.getname(nameindex) - w_fromlist = f.valuestack.pop() - try: - w_import = space.getitem(f.w_builtins, space.wrap("__import__")) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - raise OperationError(space.w_ImportError, - space.wrap("__import__ not found")) - w_locals = f.w_locals - if w_locals is None: # CPython does this - w_locals = space.w_None - w_obj = space.call_function(w_import, space.wrap(modulename), - f.w_globals, w_locals, w_fromlist) - f.valuestack.push(w_obj) - - def IMPORT_STAR(f): - w_module = f.valuestack.pop() - w_locals = f.getdictscope() - import_all_from(f.space, w_module, w_locals) - f.setdictscope(w_locals) - - def IMPORT_FROM(f, nameindex): - name = f.getname(nameindex) - w_name = f.space.wrap(name) - w_module = f.valuestack.top() - try: - w_obj = f.space.getattr(w_module, w_name) - except OperationError, e: - if not e.match(f.space, f.space.w_AttributeError): - raise - raise OperationError(f.space.w_ImportError, - f.space.wrap("cannot import name '%s'" % name)) - f.valuestack.push(w_obj) - - def JUMP_FORWARD(f, stepby): - f.next_instr += stepby - - def JUMP_IF_FALSE(f, stepby): - w_cond = f.valuestack.top() - if not f.space.is_true(w_cond): - f.next_instr += stepby - - def JUMP_IF_TRUE(f, stepby): - w_cond = f.valuestack.top() - if f.space.is_true(w_cond): - f.next_instr += stepby - - def JUMP_ABSOLUTE(f, jumpto): - f.next_instr = jumpto - - def GET_ITER(f): - w_iterable = f.valuestack.pop() - w_iterator = f.space.iter(w_iterable) - f.valuestack.push(w_iterator) - - def FOR_ITER(f, jumpby): - w_iterator = f.valuestack.top() - try: - w_nextitem = f.space.next(w_iterator) - except OperationError, e: - if not e.match(f.space, f.space.w_StopIteration): - raise - # iterator exhausted - f.valuestack.pop() - f.next_instr += jumpby - else: - f.valuestack.push(w_nextitem) - - def FOR_LOOP(f, oparg): - raise pyframe.BytecodeCorruption, "old opcode, no longer in use" - - def SETUP_LOOP(f, offsettoend): - block = pyframe.LoopBlock(f, f.next_instr + offsettoend) - f.blockstack.push(block) - - def SETUP_EXCEPT(f, offsettoend): - block = pyframe.ExceptBlock(f, f.next_instr + offsettoend) - f.blockstack.push(block) - - def SETUP_FINALLY(f, offsettoend): - block = pyframe.FinallyBlock(f, f.next_instr + offsettoend) - f.blockstack.push(block) - - def CALL_FUNCTION(f, oparg, w_star=None, w_starstar=None): - n_arguments = oparg & 0xff - n_keywords = (oparg>>8) & 0xff - keywords = {} - for i in range(n_keywords): - w_value = f.valuestack.pop() - w_key = f.valuestack.pop() - key = f.space.unwrap(w_key) # XXX type check: str - keywords[key] = w_value - arguments = [f.valuestack.pop() for i in range(n_arguments)] - arguments.reverse() - args = Arguments(f.space, arguments, keywords, w_star, w_starstar) - w_function = f.valuestack.pop() - w_result = f.space.call_args(w_function, args) - f.valuestack.push(w_result) - - def CALL_FUNCTION_VAR(f, oparg): - w_varargs = f.valuestack.pop() - f.CALL_FUNCTION(oparg, w_varargs) - - def CALL_FUNCTION_KW(f, oparg): - w_varkw = f.valuestack.pop() - f.CALL_FUNCTION(oparg, None, w_varkw) - - def CALL_FUNCTION_VAR_KW(f, oparg): - w_varkw = f.valuestack.pop() - w_varargs = f.valuestack.pop() - f.CALL_FUNCTION(oparg, w_varargs, w_varkw) - - def MAKE_FUNCTION(f, numdefaults): - w_codeobj = f.valuestack.pop() - codeobj = f.space.unwrap(w_codeobj) - defaultarguments = [f.valuestack.pop() for i in range(numdefaults)] - defaultarguments.reverse() - fn = function.Function(f.space, codeobj, f.w_globals, defaultarguments) - f.valuestack.push(f.space.wrap(fn)) - - def BUILD_SLICE(f, numargs): - if numargs == 3: - w_step = f.valuestack.pop() - elif numargs == 2: - w_step = None - else: - raise pyframe.BytecodeCorruption - w_end = f.valuestack.pop() - w_start = f.valuestack.pop() - w_slice = f.space.newslice(w_start, w_end, w_step) - f.valuestack.push(w_slice) - - def LIST_APPEND(f): - w = f.valuestack.pop() - v = f.valuestack.pop() - f.space.call_method(v, 'append', w) - - def SET_LINENO(f, lineno): - pass - - def EXTENDED_ARG(f, oparg): - opcode = f.nextop() - oparg = oparg<<16 | f.nextarg() - fn = f.dispatch_table[opcode] - if not fn.has_arg: - raise pyframe.BytecodeCorruption - fn(f, oparg) - - def MISSING_OPCODE(f, oparg=None): - raise pyframe.BytecodeCorruption, "unknown opcode" - - ### dispatch_table ### - - # 'dispatch_table' is a class attribute: a list of functions - # it is created by 'cls.setup_dispatch_table()'. - - __metaclass__ = InitializedClass - def __initclass__(cls): - "NOT_RPYTHON" - # create the 'cls.dispatch_table' attribute - import dis - dispatch_table = [] - missing_opcode = cls.MISSING_OPCODE - for i in range(256): - opname = dis.opname[i].replace('+', '_') - fn = getattr(cls, opname, missing_opcode) - fn = getattr(fn, 'im_func',fn) - fn.has_arg = i >= dis.HAVE_ARGUMENT - #if fn is missing_opcode and not opname.startswith('<') and i>0: - # import warnings - # warnings.warn("* Warning, missing opcode %s" % opname) - dispatch_table.append(fn) - cls.dispatch_table = dispatch_table - - - gateway.importall(locals()) # app_xxx() -> xxx() - - -### helpers written at the application-level ### -# Some of these functions are expected to be generally useful if other -# parts of the code needs to do the same thing as a non-trivial opcode, -# like finding out which metaclass a new class should have. -# This is why they are not methods of PyInterpFrame. -# There are also a couple of helpers that are methods, defined in the -# class above. - -def app_print_expr(x): - try: - displayhook = sys.displayhook - except AttributeError: - raise RuntimeError("lost sys.displayhook") - displayhook(x) - -def app_file_softspace(file, newflag): - try: - softspace = file.softspace - except AttributeError: - softspace = 0 - try: - file.softspace = newflag - except AttributeError: - pass - return softspace - -def app_sys_stdout(): - try: - return sys.stdout - except AttributeError: - raise RuntimeError("lost sys.stdout") - -def app_print_item_to(x, stream): - if file_softspace(stream, False): - stream.write(" ") - stream.write(str(x)) - # add a softspace unless we just printed a string which ends in a '\t' - # or '\n' -- or more generally any whitespace character but ' ' - if isinstance(x, str) and len(x) and x[-1].isspace() and x[-1]!=' ': - return - # XXX add unicode handling - file_softspace(stream, True) - -def app_print_newline_to(stream): - stream.write("\n") - file_softspace(stream, False) - -def app_find_metaclass(bases, namespace, globals): - if '__metaclass__' in namespace: - return namespace['__metaclass__'] - elif len(bases) > 0: - base = bases[0] - if hasattr(base, '__class__'): - return base.__class__ - else: - return type(base) - elif '__metaclass__' in globals: - return globals['__metaclass__'] - else: - return type - -def app_import_all_from(module, into_locals): - try: - all = module.__all__ - except AttributeError: - try: - dict = module.__dict__ - except AttributeError: - raise ImportError("from-import-* object has no __dict__ " - "and no __all__") - all = dict.keys() - skip_leading_underscores = True - else: - skip_leading_underscores = False - for name in all: - if skip_leading_underscores and name[0]=='_': - continue - into_locals[name] = getattr(module, name) - - -gateway.importall(globals()) # app_xxx() -> xxx() diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py deleted file mode 100644 index f20e1ab2a8..0000000000 --- a/pypy/interpreter/pytraceback.py +++ /dev/null @@ -1,37 +0,0 @@ -from pypy.interpreter import baseobjspace - - -class PyTraceback(baseobjspace.Wrappable): - """Traceback object - - Public fields: - * 'tb_frame' - * 'tb_lasti' - * 'tb_lineno' - * 'tb_next' - """ - - def __init__(self, space, frame, lasti, lineno, next): - self.space = space - self.frame = frame - self.lasti = lasti - self.lineno = lineno - self.next = next - - -def record_application_traceback(space, operror, frame, last_instruction): - lineno = offset2lineno(frame.code, last_instruction) - tb = operror.application_traceback - tb = PyTraceback(space, frame, last_instruction, lineno, tb) - operror.application_traceback = tb - -def offset2lineno(c, stopat): - tab = c.co_lnotab - line = c.co_firstlineno - addr = 0 - for i in range(0, len(tab), 2): - addr = addr + ord(tab[i]) - if addr > stopat: - break - line = line + ord(tab[i+1]) - return line diff --git a/pypy/interpreter/special.py b/pypy/interpreter/special.py deleted file mode 100644 index 8c2515dc8c..0000000000 --- a/pypy/interpreter/special.py +++ /dev/null @@ -1,15 +0,0 @@ - -from pypy.interpreter.baseobjspace import Wrappable - -class Ellipsis(Wrappable): - def __init__(self, space): - self.space = space - def descr__repr__(self): - return self.space.wrap('Ellipsis') - -class NotImplemented(Wrappable): - def __init__(self, space): - self.space = space - def descr__repr__(self): - return self.space.wrap('NotImplemented') - diff --git a/pypy/interpreter/test/__init__.py b/pypy/interpreter/test/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/pypy/interpreter/test/__init__.py +++ /dev/null diff --git a/pypy/interpreter/test/autopath.py b/pypy/interpreter/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/interpreter/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/interpreter/test/foointerp.py b/pypy/interpreter/test/foointerp.py deleted file mode 100644 index 12a4fefec1..0000000000 --- a/pypy/interpreter/test/foointerp.py +++ /dev/null @@ -1,9 +0,0 @@ -w_foo2 = space.wrap("hello") -foo2 = "never mind" # should be hidden by w_foo2 - -def foobuilder(w_name): - name = space.unwrap(w_name) - return space.wrap("hi, %s!" % name) - -from __applevel__ import bar -fortytwo = bar(space.wrap(6), space.wrap(7)) diff --git a/pypy/interpreter/test/foomodule.py b/pypy/interpreter/test/foomodule.py deleted file mode 100644 index 38320e67f8..0000000000 --- a/pypy/interpreter/test/foomodule.py +++ /dev/null @@ -1,13 +0,0 @@ - -__interplevel__exec("w_foo = space.w_Ellipsis") -foo1 = __interplevel__eval("w_foo") # foo1 is Ellipsis -from __interplevel__ import foo # Ellipsis too - -def bar(a, b): # visible from interp-level code - return a * b - -__interplevel__execfile("foointerp.py") # defines w_foo2 and foobuilder() - -from __interplevel__ import foo2 # from w_foo2, gives "hello" -from __interplevel__ import foobuilder -foo3 = foobuilder("guido") # gives "hi, guido!" diff --git a/pypy/interpreter/test/hello_world.py b/pypy/interpreter/test/hello_world.py deleted file mode 100644 index 6407648b45..0000000000 --- a/pypy/interpreter/test/hello_world.py +++ /dev/null @@ -1,9 +0,0 @@ - -def main(aStr): - print len(aStr) - -map(main, ["hello world", "good bye"]) -apply(main, ("apply works, too",)) -apply(main, (), {"aStr": "it really works"}) - -print chr(65) diff --git a/pypy/interpreter/test/s1.py b/pypy/interpreter/test/s1.py deleted file mode 100644 index b0233a7855..0000000000 --- a/pypy/interpreter/test/s1.py +++ /dev/null @@ -1,18 +0,0 @@ - -### a trivial program to test strings, lists, functions and methods ### -## tiny change wrt goal so far needed: explicit parameter to str.split - -def addstr(s1,s2): - return s1 + s2 - -str = "an interesting string" -str2 = 'another::string::xxx::y:aa' -str3 = addstr(str,str2) -arr = [] -for word in str.split(' '): - if word in str2.split('::'): - arr.append(word) -print ''.join(arr) -print "str + str2 = ", str3 - - diff --git a/pypy/interpreter/test/test_class.py b/pypy/interpreter/test/test_class.py deleted file mode 100644 index d4510a78a1..0000000000 --- a/pypy/interpreter/test/test_class.py +++ /dev/null @@ -1,134 +0,0 @@ - -class AppTestClass: - - def test_class(self): - class C: - pass - assert C.__class__ == type - c = C() - assert c.__class__ == C - - def test_metaclass_explicit(self): - class M(type): - pass - class C: - __metaclass__ = M - assert C.__class__ == M - c = C() - assert c.__class__ == C - - def test_metaclass_inherited(self): - class M(type): - pass - class B: - __metaclass__ = M - class C(B): - pass - assert C.__class__ == M - c = C() - assert c.__class__ == C - - def test_metaclass_global(self): - d = {} - metatest_text = """ -class M(type): - pass - -__metaclass__ = M - -class C: - pass -""" - exec metatest_text in d - C = d['C'] - M = d['M'] - assert C.__class__ == M - c = C() - assert c.__class__ == C - - def test_method(self): - class C: - def meth(self): - return 1 - c = C() - assert c.meth() == 1 - - def test_method_exc(self): - class C: - def meth(self): - raise RuntimeError - c = C() - raises(RuntimeError, c.meth) - - def test_class_attr(self): - class C: - a = 42 - c = C() - assert c.a == 42 - assert C.a == 42 - - def test_class_attr_inherited(self): - class C: - a = 42 - class D(C): - pass - d = D() - assert d.a == 42 - assert D.a == 42 - - def test___new__(self): - class A(object): - pass - assert isinstance(A(), A) - assert isinstance(object.__new__(A), A) - assert isinstance(A.__new__(A), A) - - def test_int_subclass(self): - class R(int): - pass - x = R(5) - assert type(x) == R - assert x == 5 - assert type(int(x)) == int - assert int(x) == 5 - - def test_long_subclass(self): - class R(long): - pass - x = R(5L) - assert type(x) == R - assert x == 5L - assert type(long(x)) == long - assert long(x) == 5L - - def test_float_subclass(self): - class R(float): - pass - x = R(5.5) - assert type(x) == R - assert x == 5.5 - assert type(float(x)) == float - assert float(x) == 5.5 - - def test_meth_doc(self): - class C: - def meth_no_doc(self): - pass - def meth_doc(self): - """this is a docstring""" - pass - c = C() - assert C.meth_no_doc.__doc__ == None - assert c.meth_no_doc.__doc__ == None - assert C.meth_doc.__doc__ == """this is a docstring""" - assert c.meth_doc.__doc__ == """this is a docstring""" - - def test_getattribute(self): - class C: - def __getattribute__(self, attr): - if attr == 'one': - return 'two' - return super(C, self).__getattribute__(attr) - c = C() - assert c.one == "two" - raises(AttributeError, getattr, c, "two") diff --git a/pypy/interpreter/test/test_code.py b/pypy/interpreter/test/test_code.py deleted file mode 100644 index 7fc69c7965..0000000000 --- a/pypy/interpreter/test/test_code.py +++ /dev/null @@ -1,96 +0,0 @@ - -import autopath -import unittest - - -class AppTestCodeIntrospection: - - def test_attributes(self): - def f(): pass - def g(x, *y, **z): "docstring" - assert hasattr(f.func_code, 'co_code') - assert hasattr(g.func_code, 'co_code') - - testcases = [ - (f.func_code, {'co_name': 'f', - 'co_names': (), - 'co_varnames': (), - 'co_argcount': 0, - 'co_consts': (None,) - }), - (g.func_code, {'co_name': 'g', - 'co_names': (), - 'co_varnames': ('x', 'y', 'z'), - 'co_argcount': 1, - 'co_consts': ("docstring", None), - }), - ] - - import sys - if sys.pypy_objspaceclass != 'TrivialObjSpace': - testcases += [ - (abs.func_code, {'co_name': 'abs', - 'co_varnames': ('val',), - 'co_argcount': 1, - 'co_flags': 0, - 'co_consts': ("abs(number) -> number\n\nReturn the absolute value of the argument.",), - }), - (object.__init__.im_func.func_code, - {#'co_name': '__init__', XXX getting descr__init__ - 'co_varnames': ('obj', 'args', 'keywords'), - 'co_argcount': 1, - 'co_flags': 0x000C, # VARARGS|VARKEYWORDS - }), - ] - - # in PyPy, built-in functions have code objects - # that emulate some attributes - for code, expected in testcases: - assert hasattr(code, '__class__') - assert not hasattr(code,'__dict__') - for key, value in expected.items(): - assert getattr(code, key) == value - - def test_code(self): - import sys - try: - import new - except ImportError: - skip("could not import new module") - if sys.pypy_objspaceclass == 'TrivialObjSpace': - return # skip - codestr = "global c\na = 1\nb = 2\nc = a + b\n" - ccode = compile(codestr, '<string>', 'exec') - co = new.code(ccode.co_argcount, - ccode.co_nlocals, - ccode.co_stacksize, - ccode.co_flags, - ccode.co_code, - ccode.co_consts, - ccode.co_names, - ccode.co_varnames, - ccode.co_filename, - ccode.co_name, - ccode.co_firstlineno, - ccode.co_lnotab, - ccode.co_freevars, - ccode.co_cellvars) - d = {} - exec co in d - assert d['c'] == 3 - # test backwards-compatibility version with no freevars or cellvars - co = new.code(ccode.co_argcount, - ccode.co_nlocals, - ccode.co_stacksize, - ccode.co_flags, - ccode.co_code, - ccode.co_consts, - ccode.co_names, - ccode.co_varnames, - ccode.co_filename, - ccode.co_name, - ccode.co_firstlineno, - ccode.co_lnotab) - d = {} - exec co in d - assert d['c'] == 3 diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py deleted file mode 100644 index 4b2531da0c..0000000000 --- a/pypy/interpreter/test/test_eval.py +++ /dev/null @@ -1,68 +0,0 @@ - -import autopath -from pypy.interpreter.eval import Frame, UNDEFINED -from pypy.interpreter.pycode import PyCode - - -class TestFrame: - def setup_method(self, method): - def c(x, y, *args): - pass - code = PyCode()._from_code(c.func_code) - - class ConcreteFastscopeFrame(Frame): - - def __init__(self, space, code, numlocals): - Frame.__init__(self, space, code, numlocals=numlocals) - self.fastlocals_w = [UNDEFINED] * self.numlocals - - def setfastscope(self, scope_w): - self.fastlocals_w = scope_w - - def getfastscope(self): - return self.fastlocals_w - - self.f = ConcreteFastscopeFrame(self.space, code, numlocals=5) - - - def test_fast2locals(self): - space = self.space - w = space.wrap - self.f.fast2locals() - assert space.eq_w(self.f.w_locals, self.space.newdict([])) - - self.f.fastlocals_w[0] = w(5) - self.f.fast2locals() - assert space.eq_w(self.f.w_locals, self.space.newdict([ - (w('x'), w(5))])) - - self.f.fastlocals_w[2] = w(7) - self.f.fast2locals() - assert space.eq_w(self.f.w_locals, self.space.newdict([ - (w('x'), w(5)), - (w('args'), w(7))])) - - def sameList(self, l1, l2): - assert len(l1) == len(l2) - for w_1, w_2 in zip(l1, l2): - assert not ((w_1 is UNDEFINED) != (w_2 is UNDEFINED)) - if w_1 is not UNDEFINED: - assert self.space.eq_w(w_1, w_2) - - def test_locals2fast(self): - w = self.space.wrap - self.f.w_locals = self.space.newdict([]) - self.f.locals2fast() - self.sameList(self.f.fastlocals_w, [UNDEFINED]*5) - - self.f.w_locals = self.space.newdict([ - (w('x'), w(5))]) - self.f.locals2fast() - self.sameList(self.f.fastlocals_w, [w(5)] + [UNDEFINED]*4) - - self.f.w_locals = self.space.newdict([ - (w('x'), w(5)), - (w('args'), w(7))]) - self.f.locals2fast() - self.sameList(self.f.fastlocals_w, [w(5), UNDEFINED, w(7), - UNDEFINED, UNDEFINED]) diff --git a/pypy/interpreter/test/test_exceptcomp.py b/pypy/interpreter/test/test_exceptcomp.py deleted file mode 100644 index 00e177b215..0000000000 --- a/pypy/interpreter/test/test_exceptcomp.py +++ /dev/null @@ -1,131 +0,0 @@ -"""Test comparisons of Exceptions in except clauses. - -New for PyPy - Could be incorporated into CPython regression tests. -""" -import autopath - -class AppTestExceptionComp: - -### XXX - String exceptions depreciated? -## def test_string(self): -## string = "string" -## try: -## raise string -## except string: -## pass -## except: -## self.fail("Identical string exceptions do not match.") -## -## def test_stringfail(self): -## string1 = "string1" -## string1_ = "string" + "1" -## assert string1 is not string1_ -## try: -## raise string1 -## except "string2": -## self.fail("Different string exceptions match.") -## except string1_: -## self.fail("Non Identical string exceptions match.") -## except string1: -## pass -## except: -## self.fail("Unknown value for variable raise.") - - - def test_exception(self): - try: - raise TypeError, "nothing" - except TypeError: - pass - except: - self.fail("Identical exceptions do not match.") - - def test_exceptionfail(self): - try: - raise TypeError, "nothing" - except KeyError: - self.fail("Different exceptions match.") - except TypeError: - pass - except: - self.fail("Unanticipated value for exception raise.") - - - def test_called(self): - try: - raise SyntaxError("Invalid") - except SyntaxError: - pass - except: - self.fail("Instantiated exception does not match parent class.") - - def test_calledfail(self): - try: - raise SyntaxError("Invalid") - except ZeroDivisionError: - self.fail("Instantiated exception matches different parent class.") - except SyntaxError: - pass - except: - self.fail("Unanticpated value for exception raise.") - - - def test_userclass(self): - class UserExcept(Exception): - pass - try: - raise UserExcept, "nothing" - except UserExcept: - pass - except: - self.fail("User defined class exceptions do not match.") - - def test_subclass(self): - try: - raise KeyError("key") - except LookupError: - pass - except: - self.fail("Exception does not match parent class.") - - def test_deepsubclass(self): - try: - raise FloatingPointError("1.2r") - except Exception: - pass - except: - self.fail("Exception does not match grandparent class.") - - def test_tuple(self): - try: - raise ArithmeticError("2+jack") - except (ZeroDivisionError, ArithmeticError): - pass - except: - self.fail("Exception does not match self in tuple.") - - def test_parenttuple(self): - try: - raise ZeroDivisionError("0") - except (StandardError, SystemExit): - pass - except: - self.fail("Exception does not match parent in tuple.") - - def test_nestedtuples(self): - try: - raise AssertionError("0") - except (SystemExit, (KeyboardInterrupt, AssertionError)): - pass - except: - self.fail("Exception does not match self in nested tuple.") - - def test_deeptuples(self): - try: - raise IOError - except (FloatingPointError,(OSError, - (SyntaxError,IOError,ZeroDivisionError)), - (MemoryError, NotImplementedError)): - pass - except: - self.fail("Exception does not match self in deeply nested tuple.") diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py deleted file mode 100644 index b73380d0b5..0000000000 --- a/pypy/interpreter/test/test_exec.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Test the exec statement functionality. - -New for PyPy - Could be incorporated into CPython regression tests. -""" -import autopath - -class AppTestExecStmt: - - def test_string(self): - g = {} - l = {} - exec "a = 3" in g, l - assert not l['a'] != 3 - - def test_localfill(self): - g = {} - exec "a = 3" in g - assert not g['a'] != 3 - - def test_builtinsupply(self): - g = {} - exec "pass" in g - assert g.has_key('__builtins__') - - def test_invalidglobal(self): - def f(): - exec 'pass' in 1 - raises(TypeError,f) - - def test_invalidlocal(self): - def f(): - exec 'pass' in {}, 2 - raises(TypeError,f) - - def test_codeobject(self): - co = compile("a = 3", '<string>', 'exec') - g = {} - l = {} - exec co in g, l - assert not l['a'] != 3 - -## # Commented out as PyPy give errors using open() -## # ["Not availible in restricted mode"] -## def test_file(self): -## fo = open("test_exec.py", 'r') -## g = {} -## exec fo in g -## self.failUnless(g.has_key('TestExecStmt')) - - def test_implicit(self): - a = 4 - exec "a = 3" - assert not a !=3 - - def test_tuplelocals(self): - g = {} - l = {} - exec ("a = 3", g, l) - assert not l['a'] != 3 - - def test_tupleglobals(self): - g = {} - exec ("a = 3", g) - assert not g['a'] != 3 - - def test_exceptionfallthrough(self): - def f(): - exec 'raise TypeError' in {} - raises(TypeError,f) diff --git a/pypy/interpreter/test/test_extmodule.py b/pypy/interpreter/test/test_extmodule.py deleted file mode 100644 index cd8f35f4e8..0000000000 --- a/pypy/interpreter/test/test_extmodule.py +++ /dev/null @@ -1,25 +0,0 @@ - -import autopath -import os -from pypy.interpreter.extmodule import BuiltinModule - -class TestBuiltinModule: - def test_foomodule(self): - space = self.space - sourcefile = os.path.join(autopath.this_dir, 'foomodule.py') - m = BuiltinModule(space, 'foo', sourcefile=sourcefile) - w = space.wrap - w_m = space.wrap(m) - self.space.eq_w(space.getattr(w_m, w('__name__')), w('foo')) - self.space.eq_w(space.getattr(w_m, w('__file__')), w(sourcefile)) - # check app-level definitions - self.space.eq_w(m.w_foo, space.w_Ellipsis) - self.space.eq_w(space.getattr(w_m, w('foo1')), space.w_Ellipsis) - self.space.eq_w(space.getattr(w_m, w('foo')), space.w_Ellipsis) - self.space.eq_w(space.call_method(w_m, 'bar', w(4), w(3)), w(12)) - self.space.eq_w(space.getattr(w_m, w('foo2')), w('hello')) - self.space.eq_w(space.getattr(w_m, w('foo3')), w('hi, guido!')) - # check interp-level definitions - self.space.eq_w(m.w_foo2, w('hello')) - self.space.eq_w(m.foobuilder(w('xyzzy')), w('hi, xyzzy!')) - self.space.eq_w(m.fortytwo, w(42)) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py deleted file mode 100644 index 99051821a1..0000000000 --- a/pypy/interpreter/test/test_function.py +++ /dev/null @@ -1,231 +0,0 @@ - -import autopath -import unittest -from pypy.interpreter.function import Function, Method -from pypy.interpreter.pycode import PyCode -from pypy.interpreter.argument import Arguments - - -class AppTestFunctionIntrospection: - def test_attributes(self): - def f(): pass - assert hasattr(f, 'func_code') - assert f.func_defaults == None - assert f.func_dict == {} - assert type(f.func_globals) == dict - #self.assertEquals(f.func_closure, None) XXX - assert f.func_doc == None - assert f.func_name == 'f' - - def test_code_is_ok(self): - def f(): pass - assert not hasattr(f.func_code, '__dict__') - - def test_underunder_attributes(self): - def f(): pass - assert f.__name__ == 'f' - assert f.__doc__ == None - assert f.__name__ == f.func_name - assert f.__doc__ == f.func_doc - assert f.__dict__ is f.func_dict - assert hasattr(f, '__class__') - - def test_write_doc(self): - def f(): "hello" - assert f.__doc__ == 'hello' - f.__doc__ = 'good bye' - assert f.__doc__ == 'good bye' - del f.__doc__ - assert f.__doc__ == None - - def test_write_func_doc(self): - def f(): "hello" - assert f.func_doc == 'hello' - f.func_doc = 'good bye' - assert f.func_doc == 'good bye' - del f.func_doc - assert f.func_doc == None - -class AppTestFunction: - def test_simple_call(self): - def func(arg1, arg2): - return arg1, arg2 - res = func(23,42) - assert res[0] == 23 - assert res[1] == 42 - - def test_simple_varargs(self): - def func(arg1, *args): - return arg1, args - res = func(23,42) - assert res[0] == 23 - assert res[1] == (42,) - - def test_simple_kwargs(self): - def func(arg1, **kwargs): - return arg1, kwargs - res = func(23, value=42) - assert res[0] == 23 - assert res[1] == {'value': 42} - - def test_kwargs_sets_wrong_positional_raises(self): - def func(arg1): - pass - raises(TypeError, func, arg2=23) - - def test_kwargs_sets_positional(self): - def func(arg1): - return arg1 - res = func(arg1=42) - assert res == 42 - - def test_kwargs_sets_positional_mixed(self): - def func(arg1, **kw): - return arg1, kw - res = func(arg1=42, something=23) - assert res[0] == 42 - assert res[1] == {'something': 23} - - def test_kwargs_sets_positional_mixed(self): - def func(arg1, **kw): - return arg1, kw - res = func(arg1=42, something=23) - assert res[0] == 42 - assert res[1] == {'something': 23} - - def test_kwargs_sets_positional_twice(self): - def func(arg1, **kw): - return arg1, kw - raises( - TypeError, func, 42, {'arg1': 23}) - - def test_default_arg(self): - def func(arg1,arg2=42): - return arg1, arg2 - res = func(arg1=23) - assert res[0] == 23 - assert res[1] == 42 - - def test_defaults_keyword_overrides(self): - def func(arg1=42, arg2=23): - return arg1, arg2 - res = func(arg1=23) - assert res[0] == 23 - assert res[1] == 23 - - def test_defaults_keyword_override_but_leaves_empty_positional(self): - def func(arg1,arg2=42): - return arg1, arg2 - raises(TypeError, func, arg2=23) - - def test_kwargs_disallows_same_name_twice(self): - def func(arg1, **kw): - return arg1, kw - raises(TypeError, func, 42, **{'arg1': 23}) - - def test_kwargs_confusing_name(self): - def func(self): # 'self' conflicts with the interp-level - return self*7 # argument to call_function() - res = func(self=6) - assert res == 42 - - def test_get(self): - def func(self): return self - obj = object() - meth = func.__get__(obj, object) - assert meth() == obj - - def test_call_builtin(self): - s = 'hello' - raises(TypeError, len) - assert len(s) == 5 - raises(TypeError, len, s, s) - raises(TypeError, len, s, s, s) - assert len(*[s]) == 5 - assert len(s, *[]) == 5 - raises(TypeError, len, some_unknown_keyword=s) - raises(TypeError, len, s, some_unknown_keyword=s) - raises(TypeError, len, s, s, some_unknown_keyword=s) - -class AppTestMethod: - def test_get(self): - def func(self): return self - class Object(object): pass - obj = Object() - # Create bound method from function - obj.meth = func.__get__(obj, Object) - assert obj.meth() == obj - # Create bound method from method - meth2 = obj.meth.__get__(obj, Object) - assert meth2() == obj - - def test_get_get(self): - # sanxiyn's test from email - def m(self): return self - class C: pass - class D(C): pass - C.m = m - D.m = C.m - c = C() - assert c.m() == c - d = D() - assert d.m() == d - -class TestMethod: - def setup_method(self, method): - def c(self, bar): - return bar - code = PyCode()._from_code(c.func_code) - self.fn = Function(self.space, code) - - def test_get(self): - space = self.space - w_meth = self.fn.descr_function_get(space.wrap(5), space.type(space.wrap(5))) - meth = space.unwrap(w_meth) - assert isinstance(meth, Method) - - def test_call(self): - space = self.space - w_meth = self.fn.descr_function_get(space.wrap(5), space.type(space.wrap(5))) - meth = space.unwrap(w_meth) - w_result = meth.call_args(Arguments(space, [space.wrap(42)])) - assert space.unwrap(w_result) == 42 - - def test_fail_call(self): - space = self.space - w_meth = self.fn.descr_function_get(space.wrap(5), space.type(space.wrap(5))) - meth = space.unwrap(w_meth) - args = Arguments(space, [space.wrap("spam"), space.wrap("egg")]) - self.space.raises_w(self.space.w_TypeError, meth.call_args, args) - - def test_method_get(self): - space = self.space - # Create some function for this test only - def m(self): return self - func = Function(space, PyCode()._from_code(m.func_code)) - # Some shorthands - obj1 = space.wrap(23) - obj2 = space.wrap(42) - args = Arguments(space, []) - # Check method returned from func.__get__() - w_meth1 = func.descr_function_get(obj1, space.type(obj1)) - meth1 = space.unwrap(w_meth1) - assert isinstance(meth1, Method) - assert meth1.call_args(args) == obj1 - # Check method returned from method.__get__() - # --- meth1 is already bound so meth1.__get__(*) is meth1. - w_meth2 = meth1.descr_method_get(obj2, space.type(obj2)) - meth2 = space.unwrap(w_meth2) - assert isinstance(meth2, Method) - assert meth2.call_args(args) == obj1 - # Check method returned from unbound_method.__get__() - w_meth3 = func.descr_function_get(None, space.type(obj2)) - meth3 = space.unwrap(w_meth3) - w_meth4 = meth3.descr_method_get(obj2, space.w_None) - meth4 = space.unwrap(w_meth4) - assert isinstance(meth4, Method) - assert meth4.call_args(args) == obj2 - # Check method returned from unbound_method.__get__() - # --- with an incompatible class - w_meth5 = meth3.descr_method_get(space.wrap('hello'), space.w_None) - assert space.is_true(space.is_(w_meth5, w_meth3)) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py deleted file mode 100644 index d38768b3b0..0000000000 --- a/pypy/interpreter/test/test_gateway.py +++ /dev/null @@ -1,105 +0,0 @@ - -import autopath -from pypy.interpreter import gateway - -class TestBuiltinCode: - def test_signature(self): - def c(space, w_x, w_y, *hello_w): - pass - code = gateway.BuiltinCode(c) - assert code.signature() == (['x', 'y'], 'hello', None) - def d(self, w_boo): - pass - code = gateway.BuiltinCode(d) - assert code.signature() == (['self', 'boo'], None, None) - def e(space, w_x, w_y, __args__): - pass - code = gateway.BuiltinCode(e) - assert code.signature() == (['x', 'y'], 'args', 'keywords') - - def test_call(self): - def c(space, w_x, w_y, *hello_w): - u = space.unwrap - w = space.wrap - assert len(hello_w) == 2 - assert u(hello_w[0]) == 0 - assert u(hello_w[1]) == True - return w((u(w_x) - u(w_y) + len(hello_w))) - code = gateway.BuiltinCode(c) - w = self.space.wrap - w_dict = self.space.newdict([ - (w('x'), w(123)), - (w('y'), w(23)), - (w('hello'), self.space.newtuple([w(0), w(True)])), - ]) - w_result = code.exec_code(self.space, w_dict, w_dict) - self.space.eq_w(w_result, w(102)) - - def test_call_args(self): - def c(space, w_x, w_y, __args__): - args_w, kwds_w = __args__.unpack() - u = space.unwrap - w = space.wrap - return w((u(w_x) - u(w_y) + len(args_w)) - * u(kwds_w['boo'])) - code = gateway.BuiltinCode(c) - w = self.space.wrap - w_dict = self.space.newdict([ - (w('x'), w(123)), - (w('y'), w(23)), - (w('args'), self.space.newtuple([w(0), w(True)])), - (w('keywords'), self.space.newdict([(w('boo'), w(10))])), - ]) - w_result = code.exec_code(self.space, w_dict, w_dict) - self.space.eq_w(w_result, w(1020)) - - -class TestGateway: - def test_app2interp(self): - w = self.space.wrap - def app_g3(a, b): - return a+b - g3 = gateway.app2interp_temp(app_g3) - self.space.eq_w(g3(self.space, w('foo'), w('bar')), w('foobar')) - - def test_interp2app(self): - space = self.space - w = space.wrap - def g3(space, w_a, w_b): - return space.add(w_a, w_b) - app_g3 = gateway.interp2app_temp(g3) - w_app_g3 = space.wrap(app_g3) - self.space.eq_w( - space.call(w_app_g3, - space.newtuple([w('foo'), w('bar')]), - space.newdict([])), - w('foobar')) - self.space.eq_w( - space.call_function(w_app_g3, w('foo'), w('bar')), - w('foobar')) - - def test_importall(self): - w = self.space.wrap - g = {} - exec """ -def app_g3(a, b): - return a+b -def app_g1(x): - return g3('foo', x) -""" in g - gateway.importall(g, temporary=True) - g1 = g['g1'] - self.space.eq_w(g1(self.space, w('bar')), w('foobar')) - - def test_exportall(self): - w = self.space.wrap - g = {} - exec """ -def g3(space, w_a, w_b): - return space.add(w_a, w_b) -def app_g1(x): - return g3('foo', x) -""" in g - gateway.exportall(g, temporary=True) - g1 = gateway.app2interp_temp(g['app_g1']) - self.space.eq_w(g1(self.space, w('bar')), w('foobar')) diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py deleted file mode 100644 index c609d7ed53..0000000000 --- a/pypy/interpreter/test/test_generator.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import generators -import autopath - -class AppTestGenerator: - - def test_generator(self): - def f(): - yield 1 - assert f().next() == 1 - - def test_generator2(self): - def f(): - yield 1 - g = f() - assert g.next() == 1 - raises(StopIteration, g.next) - - def test_generator3(self): - def f(): - yield 1 - g = f() - assert list(g) == [1] - - def test_generator4(self): - def f(): - yield 1 - g = f() - assert [x for x in g] == [1] - - def test_generator_explicit_stopiteration(self): - def f(): - yield 1 - raise StopIteration - g = f() - assert [x for x in g] == [1] - - def test_generator_propagate_stopiteration(self): - def f(): - it = iter([1]) - while 1: yield it.next() - g = f() - assert [x for x in g] == [1] - - def test_generator_restart(self): - def g(): - i = me.next() - yield i - me = g() - raises(ValueError, me.next) diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py deleted file mode 100644 index dd0426da23..0000000000 --- a/pypy/interpreter/test/test_interpreter.py +++ /dev/null @@ -1,261 +0,0 @@ -import py - -class TestInterpreter: - def codetest(self, source, functionname, args): - """Compile and run the given code string, and then call its function - named by 'functionname' with arguments 'args'.""" - from pypy.interpreter import baseobjspace, executioncontext - from pypy.interpreter import pyframe, gateway, module - space = self.space - - source = str(py.code.Source(source).strip()) + '\n' - - compile = space.builtin.compile - w = space.wrap - w_code = compile(w(source), w('<string>'), w('exec'), w(0), w(0)) - - ec = executioncontext.ExecutionContext(space) - - tempmodule = module.Module(space, w("__temp__")) - w_glob = tempmodule.w_dict - space.setitem(w_glob, w("__builtins__"), space.w_builtins) - - code = space.unwrap(w_code) - code.exec_code(space, w_glob, w_glob) - - wrappedargs = [w(a) for a in args] - wrappedfunc = space.getitem(w_glob, w(functionname)) - try: - w_output = space.call_function(wrappedfunc, *wrappedargs) - except baseobjspace.OperationError, e: - #e.print_detailed_traceback(space) - return '<<<%s>>>' % e.errorstr(space) - else: - return space.unwrap(w_output) - - #def setup_class(cls): - # cls.space = testit.objspace() - - #def teardown_class(cls): - # del cls.space - - def test_exception_trivial(self): - x = self.codetest('''\ - def f(): - try: - raise Exception() - except Exception, e: - return 1 - return 2 - ''', 'f', []) - assert x == 1 - - def test_exception(self): - x = self.codetest(''' - def f(): - try: - raise Exception, 1 - except Exception, e: - return e.args[0] - ''', 'f', []) - assert x == 1 - - def test_finally(self): - code = ''' - def f(a): - try: - if a: - raise Exception - a = -12 - finally: - return a - ''' - assert self.codetest(code, 'f', [0]) == -12 - assert self.codetest(code, 'f', [1]) == 1 - -## def test_raise(self): -## x = self.codetest(''' -## def f(): -## raise 1 -## ''', 'f', []) -## self.assertEquals(x, '<<<TypeError: exceptions must be instances or subclasses of Exception or strings (deprecated), not int>>>') - - def test_except2(self): - x = self.codetest(''' - def f(): - try: - z = 0 - try: - "x"+1 - except TypeError, e: - z = 5 - raise e - except TypeError: - return z - ''', 'f', []) - assert x == 5 - - def test_except3(self): - code = ''' - def f(v): - z = 0 - try: - z = 1//v - except ZeroDivisionError, e: - z = "infinite result" - return z - ''' - assert self.codetest(code, 'f', [2]) == 0 - assert self.codetest(code, 'f', [0]) == "infinite result" - ess = "TypeError: unsupported operand type" - res = self.codetest(code, 'f', ['x']) - assert res.find(ess) >= 0 - # the following (original) test was a bit too strict...: - # self.assertEquals(self.codetest(code, 'f', ['x']), "<<<TypeError: unsupported operand type(s) for //: 'int' and 'str'>>>") - - def test_break(self): - code = ''' - def f(n): - total = 0 - for i in range(n): - try: - if i == 4: - break - finally: - total += i - return total - ''' - assert self.codetest(code, 'f', [4]) == 1+2+3 - assert self.codetest(code, 'f', [9]) == 1+2+3+4 - - def test_continue(self): - code = ''' - def f(n): - total = 0 - for i in range(n): - try: - if i == 4: - continue - finally: - total += 100 - total += i - return total - ''' - assert self.codetest(code, 'f', [4]) == 1+2+3+400 - assert self.codetest(code, 'f', [9]) == ( - 1+2+3 + 5+6+7+8+900) - - def test_import(self): - # Regression test for a bug in PyInterpFrame.IMPORT_NAME: when an - # import statement was executed in a function without a locals dict, a - # plain unwrapped None could be passed into space.call_function causing - # assertion errors later on. - real_call_function = self.space.call_function - def safe_call_function(w_obj, *arg_w): - for arg in arg_w: - assert arg is not None - return real_call_function(w_obj, *arg_w) - self.space.call_function = safe_call_function - code = ''' - def f(): - import sys - ''' - self.codetest(code, 'f', []) - - def test_extended_arg(self): - longexpr = 'x = x or ' + '-x' * 2500 - code = ''' - def f(x): - %s - %s - %s - %s - %s - %s - %s - %s - %s - %s - while x: - x -= 1 # EXTENDED_ARG is for the JUMP_ABSOLUTE at the end of the loop - return x - ''' % ((longexpr,)*10) - assert self.codetest(code, 'f', [3]) == 0 - - def test_call_star_starstar(self): - code = '''\ - def f1(n): - return n*2 - def f38(n): - f = f1 - r = [ - f(n, *[]), - f(n), - apply(f, (n,)), - apply(f, [n]), - f(*(n,)), - f(*[n]), - f(n=n), - f(**{'n': n}), - apply(f, (n,), {}), - apply(f, [n], {}), - f(*(n,), **{}), - f(*[n], **{}), - f(n, **{}), - f(n, *[], **{}), - f(n=n, **{}), - f(n=n, *[], **{}), - f(*(n,), **{}), - f(*[n], **{}), - f(*[], **{'n':n}), - ] - return r - ''' - assert self.codetest(code, 'f38', [117]) == [234]*19 - - def test_star_arg(self): - code = ''' - def f(x, *y): - return y - def g(u, v): - return f(u, *v) - ''' - assert self.codetest(code, 'g', [12, ()]) == () - assert self.codetest(code, 'g', [12, (3,4)]) == (3,4) - assert self.codetest(code, 'g', [12, []]) == () - assert self.codetest(code, 'g', [12, [3,4]]) == (3,4) - assert self.codetest(code, 'g', [12, {}]) == () - assert self.codetest(code, 'g', [12, {3:1}]) == (3,) - -class AppTestInterpreter: - def test_trivial(self): - x = 42 - assert x == 42 - - def test_trivial_call(self): - def f(): return 42 - assert f() == 42 - - def test_trivial_call2(self): - def f(): return 1 + 1 - assert f() == 2 - - def test_print(self): - import sys - save = sys.stdout - class Out: - def __init__(self): - self.args = [] - def write(self, *args): - self.args.extend(args) - out = Out() - try: - sys.stdout = out - print 10 - assert out.args == ['10','\n'] - finally: - sys.stdout = save - - def test_identity(self): - def f(x): return x - assert f(666) == 666 diff --git a/pypy/interpreter/test/test_main.py b/pypy/interpreter/test/test_main.py deleted file mode 100644 index cc3a386f1f..0000000000 --- a/pypy/interpreter/test/test_main.py +++ /dev/null @@ -1,54 +0,0 @@ -import autopath -from cStringIO import StringIO - -import py -from pypy.tool.udir import udir -from pypy.interpreter.baseobjspace import OperationError -from pypy.interpreter import main - -testcode = """\ -def main(): - aStr = 'hello world' - print len(aStr) - -main() -""" - -testresultoutput = '11\n' - -def checkoutput(space, expected_output,f,*args): - w_sys = space.get_builtin_module("sys") - w_oldout = space.getattr(w_sys, space.wrap("stdout")) - capturefn = udir.join('capturefile') - capturefile = capturefn.open('w') - space.setattr(w_sys, space.wrap("stdout"), space.wrap(capturefile)) - try: - f(*(args + (space,))) - finally: - space.setattr(w_sys, space.wrap("stdout"), w_oldout) - capturefile.close() - assert capturefn.read(mode='rU') == expected_output - - -testfn = 'tmp_hello_world.py' - -class TestMain: - def setup_class(cls): - ofile = open(testfn, 'w') - ofile.write(testcode) - ofile.close() - - def teardown_class(cls): - import os - os.remove(testfn) - - def test_run_file(self): - checkoutput(self.space, testresultoutput,main.run_file,testfn) - - def test_run_string(self): - checkoutput(self.space, testresultoutput, - main.run_string,testcode,testfn) - - def test_eval_string(self): - w_x = main.eval_string('2+2', space=self.space) - self.space.eq_w(w_x, self.space.wrap(4)) diff --git a/pypy/interpreter/test/test_module.py b/pypy/interpreter/test/test_module.py deleted file mode 100644 index c827083182..0000000000 --- a/pypy/interpreter/test/test_module.py +++ /dev/null @@ -1,39 +0,0 @@ - -import autopath -from pypy.interpreter.module import Module - -class TestModule: - def setup_class(cls): - cls.m = Module(cls.space, cls.space.wrap('m')) - def teardown_class(cls): - del cls.m - - def test_name(self): - w = self.space.wrap - w_m = w(self.m) - self.space.eq_w(self.space.getattr(w_m, w('__name__')), w('m')) - - def test_attr(self): - w = self.space.wrap - w_m = w(self.m) - self.space.setattr(w_m, w('x'), w(15)) - self.space.eq_w(self.space.getattr(w_m, w('x')), w(15)) - self.space.delattr(w_m, w('x')) - self.space.raises_w(self.space.w_AttributeError, - self.space.delattr, w_m, w('x')) - -class AppTest_ModuleObject: - def test_attr(self): - m = __import__('__builtin__') - m.x = 15 - assert m.x == 15 - assert getattr(m, 'x') == 15 - setattr(m, 'x', 23) - assert m.x == 23 - assert getattr(m, 'x') == 23 - del m.x - raises(AttributeError, getattr, m, 'x') - m.x = 15 - delattr(m, 'x') - raises(AttributeError, getattr, m, 'x') - raises(AttributeError, delattr, m, 'x') diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py deleted file mode 100644 index c1ee89a104..0000000000 --- a/pypy/interpreter/test/test_nestedscope.py +++ /dev/null @@ -1,57 +0,0 @@ -import autopath - - -class AppTestNestedScope: - - def test_nested_scope(self): - x = 42 - def f(): return x - assert f() == 42 - - def test_nested_scope2(self): - x = 42 - y = 3 - def f(): return x - assert f() == 42 - - def test_nested_scope3(self): - x = 42 - def f(): - def g(): - return x - return g - assert f()() == 42 - - def test_nested_scope4(self): - def f(): - x = 3 - def g(): - return x - a = g() - x = 4 - b = g() - return (a, b) - assert f() == (3, 4) - - def test_nested_scope_locals(self): - def f(): - x = 3 - def g(): - i = x - return locals() - return g() - d = f() - assert d == {'i':3} - - def test_deeply_nested_scope_locals(self): - def f(): - x = 3 - def g(): - def h(): - i = x - return locals() - return locals(), h() - return g() - outer_locals, inner_locals = f() - assert inner_locals == {'i':3} - assert len(outer_locals) == 1, "len!=1 for %r" % (outer_locals,) diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py deleted file mode 100644 index cdbdd0c079..0000000000 --- a/pypy/interpreter/test/test_objspace.py +++ /dev/null @@ -1,105 +0,0 @@ -import autopath -from py.test import raises - -# this test isn't so much to test that the objspace interface *works* -# -- it's more to test that it's *there* - -class TestObjSpace: - def test_newstring(self): - w = self.space.wrap - s = 'abc' - chars_w = [w(ord(c)) for c in s] - self.space.eq_w(w(s), self.space.newstring(chars_w)) - - def test_newstring_fail(self): - w = self.space.wrap - s = 'abc' - not_chars_w = [w(c) for c in s] - self.space.raises_w(self.space.w_TypeError, - self.space.newstring, - not_chars_w) - self.space.raises_w(self.space.w_ValueError, - self.space.newstring, - [w(-1)]) - - def test_newlist(self): - w = self.space.wrap - l = range(10) - w_l = self.space.newlist([w(i) for i in l]) - self.space.eq_w(w_l, w(l)) - - def test_newdict(self): - w = self.space.wrap - items = [(0, 1), (3, 4)] - items_w = [(w(k), w(v)) for (k, v) in items] - d = dict(items) - w_d = self.space.newdict(items_w) - self.space.eq_w(w_d, w(d)) - - def test_newtuple(self): - w = self.space.wrap - t = tuple(range(10)) - w_t = self.space.newtuple([w(i) for i in t]) - self.space.eq_w(w_t, w(t)) - - def test_is_true(self): - w = self.space.wrap - true = (1==1) - false = (1==0) - w_true = w(true) - w_false = w(false) - assert self.space.is_true(w_true) - assert not self.space.is_true(w_false) - - def test_is_(self): - w_l = self.space.newlist([]) - w_m = self.space.newlist([]) - assert self.space.is_(w_l, w_l) == self.space.w_True - assert self.space.is_(w_l, w_m) == self.space.w_False - - def test_newbool(self): - assert self.space.newbool(0) == self.space.w_False - assert self.space.newbool(1) == self.space.w_True - - def test_unpackiterable(self): - w = self.space.wrap - l = [w(1), w(2), w(3), w(4)] - w_l = self.space.newlist(l) - assert self.space.unpackiterable(w_l) == l - assert self.space.unpackiterable(w_l, 4) == l - raises(ValueError, self.space.unpackiterable, w_l, 3) - raises(ValueError, self.space.unpackiterable, w_l, 5) - - def test_unpacktuple(self): - w = self.space.wrap - l = [w(1), w(2), w(3), w(4)] - w_l = self.space.newtuple(l) - assert self.space.unpacktuple(w_l) == l - assert self.space.unpacktuple(w_l, 4) == l - raises(ValueError, self.space.unpacktuple, w_l, 3) - raises(ValueError, self.space.unpacktuple, w_l, 5) - - def test_exception_match(self): - assert self.space.exception_match(self.space.w_ValueError, - self.space.w_ValueError) - assert self.space.exception_match(self.space.w_IndexError, - self.space.w_LookupError) - assert not self.space.exception_match(self.space.w_ValueError, - self.space.w_LookupError) - -class TestModuleMinimal: - def test_sys_exists(self): - w_sys = self.space.get_builtin_module('sys') - assert self.space.is_true(w_sys) - - def test_import_exists(self): - space = self.space - w_builtin = space.get_builtin_module('__builtin__') - assert space.is_true(w_builtin) - w_name = space.wrap('__import__') - w_import = self.space.getattr(w_builtin, w_name) - assert space.is_true(w_import) - - def test_sys_import(self): - from pypy.interpreter.main import run_string - run_string('import sys', space=self.space) diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py deleted file mode 100644 index 9321a04727..0000000000 --- a/pypy/interpreter/test/test_pyframe.py +++ /dev/null @@ -1,39 +0,0 @@ -import autopath - - -class AppTestPyFrame: - - # test for the presence of the attributes, not functionality - - def test_f_locals(self): - import sys - f = sys._getframe() - assert f.f_locals is locals() - - def test_f_globals(self): - import sys - f = sys._getframe() - assert f.f_globals is globals() - - def test_f_builtins(self): - import sys, __builtin__ - f = sys._getframe() - assert f.f_builtins is __builtin__.__dict__ - - def test_f_code(self): - def g(): - import sys - f = sys._getframe() - return f.f_code - assert g() is g.func_code - - def test_f_lineno(self): - def g(): - import sys - f = sys._getframe() - x = f.f_lineno - y = f.f_lineno - z = f.f_lineno - return [x, y, z] - origin = g.func_code.co_firstlineno - assert g() == [origin+3, origin+4, origin+5] diff --git a/pypy/interpreter/test/test_raise.py b/pypy/interpreter/test/test_raise.py deleted file mode 100644 index b1775f6ab2..0000000000 --- a/pypy/interpreter/test/test_raise.py +++ /dev/null @@ -1,125 +0,0 @@ -import autopath - - -class AppTestRaise: - def test_control_flow(self): - try: - raise Exception - raise AssertionError, "exception failed to raise" - except: - pass - else: - raise AssertionError, "exception executing else clause!" - - def test_1arg(self): - try: - raise SystemError, 1 - except Exception, e: - assert e.args[0] == 1 - - def test_2args(self): - try: - raise SystemError, (1, 2) - except Exception, e: - assert e.args[0] == 1 - assert e.args[1] == 2 - - def test_instancearg(self): - try: - raise SystemError, SystemError(1, 2) - except Exception, e: - assert e.args[0] == 1 - assert e.args[1] == 2 - - def test_more_precise_instancearg(self): - try: - raise Exception, SystemError(1, 2) - except SystemError, e: - assert e.args[0] == 1 - assert e.args[1] == 2 - - def test_stringexc(self): - a = "hello world" - try: - raise a - except a, e: - assert e == None - try: - raise a, "message" - except a, e: - assert e == "message" - - def test_builtin_exc(self): - try: - [][0] - except IndexError, e: - assert isinstance(e, IndexError) - - def test_raise_cls(self): - def f(): - raise IndexError - raises(IndexError, f) - - def test_raise_cls_catch(self): - def f(r): - try: - raise r - except LookupError: - return 1 - raises(Exception, f, Exception) - assert f(IndexError) == 1 - - def test_raise_wrong(self): - try: - raise 1 - except TypeError: - pass - else: - raise AssertionError, "shouldn't be able to raise 1" - - def test_raise_three_args(self): - import sys - try: - raise ValueError - except: - exc_type,exc_val,exc_tb = sys.exc_info() - try: - raise exc_type,exc_val,exc_tb - except: - exc_type2,exc_val2,exc_tb2 = sys.exc_info() - assert exc_type ==exc_type2 - assert exc_val ==exc_val2 - assert exc_tb ==exc_tb2 - - def test_tuple_type(self): - def f(): - raise ((StopIteration, 123), 456, 789) - raises(StopIteration, f) - - def test_userclass(self): - class A: - def __init__(self, x=None): - self.x = x - class B(A): - pass - try: - raise A - except A, a: - assert a.x == None - try: - raise A(42) - except A, a: - assert a.x == 42 - try: - raise A, 42 - except A, a: - assert a.x == 42 - try: - raise B - except A, b: - assert type(b) == B - try: - raise A, B(42) - except B, b: - assert type(b) == B - assert b.x == 42 diff --git a/pypy/interpreter/test/test_special.py b/pypy/interpreter/test/test_special.py deleted file mode 100644 index 4df022dbd6..0000000000 --- a/pypy/interpreter/test/test_special.py +++ /dev/null @@ -1,13 +0,0 @@ - -import autopath - -class AppTestSpecialTestCase: - def test_Ellipsis(self): - assert Ellipsis == Ellipsis - assert repr(Ellipsis) == 'Ellipsis' - - def test_NotImplemented(self): - def f(): - return NotImplemented - assert f() == NotImplemented - assert repr(NotImplemented) == 'NotImplemented' diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py deleted file mode 100644 index be4835f896..0000000000 --- a/pypy/interpreter/test/test_typedef.py +++ /dev/null @@ -1,25 +0,0 @@ -import autopath - -# this test isn't so much to test that the objspace interface *works* -# -- it's more to test that it's *there* - -class AppTestTraceBackAttributes: - - def test_newstring(self): - import sys - def f(): - raise TypeError, "hello" - - def g(): - f() - - try: - g() - except: - typ,val,tb = sys.exc_info() - else: - raise AssertionError, "should have raised" - assert hasattr(tb, 'tb_frame') - assert hasattr(tb, 'tb_lasti') - assert hasattr(tb, 'tb_lineno') - assert hasattr(tb, 'tb_next') diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py deleted file mode 100644 index f1bef5d350..0000000000 --- a/pypy/interpreter/typedef.py +++ /dev/null @@ -1,282 +0,0 @@ -""" - - -""" -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.error import OperationError -from pypy.tool.cache import Cache -import new - -class TypeDef: - def __init__(self, __name, __base=None, **rawdict): - "NOT_RPYTHON: initialization-time only" - self.name = __name - self.base = __base - self.hasdict = '__dict__' in rawdict or (__base and __base.hasdict) - self.rawdict = rawdict - - -unique_interplevel_subclass_cache = Cache() -def get_unique_interplevel_subclass(cls): - return unique_interplevel_subclass_cache.getorbuild(cls, _buildusercls, None) - -def _buildusercls(cls, ignored): - "NOT_RPYTHON: initialization-time only" - typedef = cls.typedef - name = 'User' + cls.__name__ - body = {} - - class User_InsertNameHere(object): - - def getclass(self, space): - return self.w__class__ - - def setclass(self, w_subtype): - # XXX sanity checks here - self.w__class__ = w_subtype - - if typedef.hasdict: - def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype - - else: - def getdict(self): - return self.w__dict__ - - def setdict(self, w_dict): - space = self.space - if not space.is_true(space.isinstance(w_dict, space.w_dict)): - raise OperationError(space.w_TypeError, - space.wrap("setting dictionary to a non-dict")) - self.w__dict__ = w_dict - - def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype - self.w__dict__ = space.newdict([]) - - body = dict([(key, value) - for key, value in User_InsertNameHere.__dict__.items() - if not key.startswith('_')]) - subcls = type(name, (cls,), body) - return subcls - -def instantiate(cls): - "Create an empty instance of 'cls'." - if isinstance(cls, type): - return object.__new__(cls) - else: - return new.instance(cls) - -class GetSetProperty(Wrappable): - def __init__(self, fget, fset=None, fdel=None, doc=None): - "NOT_RPYTHON: initialization-time only" - fget = getattr(fget, 'im_func', fget) - fset = getattr(fset, 'im_func', fset) - fdel = getattr(fdel, 'im_func', fdel) - self.fget = fget - self.fset = fset - self.fdel = fdel - self.doc = doc - - def descr_property_get(space, w_property, w_obj, w_cls=None): - # XXX HAAAAAAAAAAAACK (but possibly a good one) - if w_obj == space.w_None and not space.is_true(space.is_(w_cls, space.type(space.w_None))): - #print w_property, w_obj, w_cls - return w_property - else: - return space.unwrap(w_property).fget(space, w_obj) - - def descr_property_set(space, w_property, w_obj, w_value): - fset = space.unwrap(w_property).fset - if fset is None: - raise OperationError(space.w_AttributeError, - space.wrap("read-only attribute")) - fset(space, w_obj, w_value) - - def descr_property_del(space, w_property, w_obj): - fdel = space.unwrap(w_property).fdel - if fdel is None: - raise OperationError(space.w_AttributeError, - space.wrap("cannot delete attribute")) - fdel(space, w_obj) - - typedef = TypeDef("GetSetProperty", - __get__ = interp2app(descr_property_get), - __set__ = interp2app(descr_property_set), - __delete__ = interp2app(descr_property_del), - ) - -def attrproperty(name): - "NOT_RPYTHON: initialization-time only" - def fget(space, w_obj): - obj = space.unwrap_builtin(w_obj) - return space.wrap(getattr(obj, name)) - return GetSetProperty(fget) - -def attrproperty_w(name): - "NOT_RPYTHON: initialization-time only" - def fget(space, w_obj): - obj = space.unwrap_builtin(w_obj) - w_value = getattr(obj, name) - if w_value is None: - return space.w_None - else: - return w_value - - return GetSetProperty(fget) - -# ____________________________________________________________ -# -# Definition of the type's descriptors for all the internal types - -from pypy.interpreter.eval import Code, Frame -from pypy.interpreter.pycode import PyCode, CO_VARARGS, CO_VARKEYWORDS -from pypy.interpreter.pyframe import PyFrame, ControlFlowException -from pypy.interpreter.module import Module -from pypy.interpreter.function import Function, Method, StaticMethod -from pypy.interpreter.pytraceback import PyTraceback -from pypy.interpreter.generator import GeneratorIterator -from pypy.interpreter.nestedscope import Cell -from pypy.interpreter.special import NotImplemented, Ellipsis - -def descr_get_dict(space, w_obj): - obj = space.unwrap_builtin(w_obj) - w_dict = obj.getdict() - assert w_dict is not None, repr(obj) - return w_dict - -def descr_set_dict(space, w_obj, w_dict): - obj = space.unwrap_builtin(w_obj) - obj.setdict(w_dict) - -default_dict_descr = GetSetProperty(descr_get_dict, descr_set_dict) - - -# co_xxx interface emulation for built-in code objects -def fget_co_varnames(space, w_code): - code = space.unwrap_builtin(w_code) - return space.newtuple([space.wrap(name) for name in code.getvarnames()]) - -def fget_co_argcount(space, w_code): - code = space.unwrap_builtin(w_code) - argnames, varargname, kwargname = code.signature() - return space.wrap(len(argnames)) - -def fget_co_flags(space, w_code): - code = space.unwrap_builtin(w_code) - argnames, varargname, kwargname = code.signature() - flags = 0 - if varargname is not None: flags |= CO_VARARGS - if kwargname is not None: flags |= CO_VARKEYWORDS - return space.wrap(flags) - -def fget_co_consts(space, w_code): - code = space.unwrap_builtin(w_code) - w_docstring = space.wrap(code.getdocstring()) - return space.newtuple([w_docstring]) - -Code.typedef = TypeDef('internal-code', - co_name = attrproperty('co_name'), - co_varnames = GetSetProperty(fget_co_varnames), - co_argcount = GetSetProperty(fget_co_argcount), - co_flags = GetSetProperty(fget_co_flags), - co_consts = GetSetProperty(fget_co_consts), - ) - -Frame.typedef = TypeDef('internal-frame', - f_code = attrproperty('code'), - f_locals = GetSetProperty(Frame.fget_getdictscope.im_func), - f_globals = attrproperty_w('w_globals'), - ) - -PyCode.typedef = TypeDef('code', - __new__ = interp2app(PyCode.descr_code__new__.im_func), - co_argcount = attrproperty('co_argcount'), - co_nlocals = attrproperty('co_nlocals'), - co_stacksize = attrproperty('co_stacksize'), - co_flags = attrproperty('co_flags'), - co_code = attrproperty('co_code'), - co_consts = attrproperty('co_consts'), - co_names = attrproperty('co_names'), - co_varnames = attrproperty('co_varnames'), - co_freevars = attrproperty('co_freevars'), - co_cellvars = attrproperty('co_cellvars'), - co_filename = attrproperty('co_filename'), - co_name = attrproperty('co_name'), - co_firstlineno = attrproperty('co_firstlineno'), - co_lnotab = attrproperty('co_lnotab'), - ) - -PyFrame.typedef = TypeDef('frame', - f_builtins = attrproperty_w('w_builtins'), - f_lineno = GetSetProperty(PyFrame.fget_f_lineno.im_func), - **Frame.typedef.rawdict) - -Module.typedef = TypeDef("module", - __new__ = interp2app(Module.descr_module__new__.im_func), - __init__ = interp2app(Module.descr_module__init__.im_func), - __dict__ = default_dict_descr, - ) - -getset_func_doc = GetSetProperty(Function.fget_func_doc, - Function.fset_func_doc, - Function.fdel_func_doc) - -Function.typedef = TypeDef("function", - __call__ = interp2app(Function.descr_function_call.im_func), - __get__ = interp2app(Function.descr_function_get.im_func), - func_code = attrproperty('code'), - func_doc = getset_func_doc, - func_name = attrproperty('name'), - func_dict = attrproperty_w('w_func_dict'), - func_defaults = GetSetProperty(Function.fget_func_defaults), - func_globals = attrproperty_w('w_func_globals'), - __doc__ = getset_func_doc, - __name__ = attrproperty('name'), - __dict__ = default_dict_descr, - # XXX func_closure, etc.pp - ) - -Method.typedef = TypeDef("method", - __call__ = interp2app(Method.descr_method_call.im_func), - __get__ = interp2app(Method.descr_method_get.im_func), - im_func = attrproperty_w('w_function'), - im_self = attrproperty_w('w_instance'), - im_class = attrproperty_w('w_class'), - __getattribute__ = interp2app(Method.descr_method_getattribute.im_func), - # XXX getattribute/setattribute etc.pp - ) - -StaticMethod.typedef = TypeDef("staticmethod", - __get__ = interp2app(StaticMethod.descr_staticmethod_get.im_func), - # XXX getattribute etc.pp - ) - -PyTraceback.typedef = TypeDef("traceback", - tb_frame = attrproperty('frame'), - tb_lasti = attrproperty('lasti'), - tb_lineno = attrproperty('lineno'), - tb_next = attrproperty('next'), - ) - -GeneratorIterator.typedef = TypeDef("generator", - next = interp2app(GeneratorIterator.descr_next.im_func), - __iter__ = interp2app(GeneratorIterator.descr__iter__.im_func), - gi_running = attrproperty('running'), - gi_frame = attrproperty('frame'), -) - -Cell.typedef = TypeDef("Cell") - -Ellipsis.typedef = TypeDef("Ellipsis", - __repr__ = interp2app(Ellipsis.descr__repr__.im_func), -) - -NotImplemented.typedef = TypeDef("NotImplemented", - __repr__ = interp2app(NotImplemented.descr__repr__.im_func), -) - -ControlFlowException.typedef = TypeDef("ControlFlowException") diff --git a/pypy/module/__builtin__interp.py b/pypy/module/__builtin__interp.py deleted file mode 100644 index eeb6906c23..0000000000 --- a/pypy/module/__builtin__interp.py +++ /dev/null @@ -1,309 +0,0 @@ -""" -Implementation of interpreter-level builtins. -""" -from pypy.interpreter.module import Module -from pypy.interpreter.pycode import PyCode -from pypy.interpreter.error import OperationError -_noarg = object() - -import __builtin__ as cpy_builtin - -# import useful app-level functions -from __applevel__ import execfile, callable, _iter_generator - - -def _actframe(position=0): - return space.getexecutioncontext().framestack.top(position) - -def globals(): - return _actframe().w_globals - -def locals(): - return _actframe().getdictscope() - -def _caller_globals(w_index=None): - position = space.unwrapdefault(w_index, 1) - return _actframe(position).w_globals - -def _caller_locals(w_index=None): - position = space.unwrapdefault(w_index, 1) - return _actframe(position).getdictscope() - - -def try_import_mod(w_modulename, f, w_parent, w_name, pkgdir=None): - import os - w = space.wrap - if os.path.exists(f): - w_mod = space.wrap(Module(space, w_modulename)) - space.sys.setmodule(w_mod) - space.setattr(w_mod, w('__file__'), w(f)) - if pkgdir is not None: - space.setattr(w_mod, w('__path__'), space.newlist([w(pkgdir)])) - w_dict = space.getattr(w_mod, w('__dict__')) - execfile(w(f), w_dict, w_dict) - w_mod = check_sys_modules(w_modulename) - if w_mod is not None and w_parent is not None: - space.setattr(w_parent, w_name, w_mod) - return w_mod - else: - return None - -def try_getattr(w_obj,w_name): - try: - return space.getattr(w_obj, w_name) - except OperationError, e: - # ugh, but blame CPython :-/ this is supposed to emulate - # hasattr, which eats all exceptions. - return None - -def try_getitem(w_obj,w_key): - try: - return space.getitem(w_obj, w_key) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - return None - - -def check_sys_modules(w_modulename): - try: - w_mod = space.getitem(space.sys.w_modules, w_modulename) - except OperationError, e: - pass - else: - return w_mod - if not e.match(space, space.w_KeyError): - raise - return None - -il_len = len - -def __import__(w_modulename, w_globals=None, - w_locals=None, w_fromlist=None): - modulename = space.unwrap(w_modulename) - if not isinstance(modulename, str): - try: - helper = ', not ' + modulename.__class__.__name__ - except AttributeError: - helper = '' - raise OperationError(space.w_TypeError, - space.wrap("__import__() argument 1 must be string" + helper)) - w = space.wrap - - if w_globals is not None and not space.is_true(space.is_(w_globals, space.w_None)): - ctxt_w_name = try_getitem(w_globals,w('__name__')) - ctxt_w_path = try_getitem(w_globals,w('__path__')) - else: - ctxt_w_name = None - ctxt_w_path = None - - rel_modulename = None - if ctxt_w_name is not None: - - ctxt_name_prefix_parts = space.unwrap(ctxt_w_name).split('.') - if ctxt_w_path is None: # context is a plain module - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:-1] - if ctxt_name_prefix_parts: - rel_modulename = '.'.join(ctxt_name_prefix_parts+[modulename]) - else: # context is a package module - rel_modulename = space.unwrap(ctxt_w_name)+'.'+modulename - if rel_modulename is not None: - w_mod = check_sys_modules(w(rel_modulename)) - if (w_mod is None or - not space.is_true(space.is_(w_mod,space.w_None))): - - w_mod = absolute_import(rel_modulename, - il_len(ctxt_name_prefix_parts), - w_fromlist, tentative=1) - if w_mod is not None: - return w_mod - else: - rel_modulename = None - - w_mod = absolute_import(modulename,0,w_fromlist, tentative=0) - if rel_modulename is not None: - space.setitem(space.sys.w_modules, w(rel_modulename),space.w_None) - return w_mod - -def absolute_import(modulename, baselevel, w_fromlist, tentative): - w = space.wrap - - w_mod = None - parts = modulename.split('.') - prefix = [] - # it would be nice if we could do here: w_path = space.sys.w_path - # instead: - w_path = space.getitem(space.sys.w_dict, space.wrap('path')) - - first = None - level = 0 - - for part in parts: - w_mod = load_part(w_path, prefix, part, w_mod, tentative=tentative) - if w_mod is None: - return None - - if baselevel == level: - first = w_mod - tentative = 0 - prefix.append(part) - w_path = try_getattr(w_mod,w('__path__')) - level += 1 - - if w_fromlist is not None and space.is_true(w_fromlist): - if w_path is not None: - for w_name in space.unpackiterable(w_fromlist): - load_part(w_path, prefix, space.unwrap(w_name), w_mod, - tentative=1) - return w_mod - else: - return first - -def load_part(w_path, prefix, partname, w_parent, tentative): - w = space.wrap - modulename = '.'.join(prefix+[partname]) - w_modulename = w(modulename) - w_mod = check_sys_modules(w_modulename) - if w_mod is not None: - if not space.is_true(space.is_(w_mod,space.w_None)): - return w_mod - else: - w_mod = space.get_builtin_module(modulename) - if w_mod is not None: - return w_mod - import os - for path in space.unpackiterable(w_path): - dir = os.path.join(space.unwrap(path), partname) - if os.path.isdir(dir): - f = os.path.join(dir,'__init__.py') - w_mod = try_import_mod(w_modulename, f, w_parent, w(partname), - pkgdir=dir) - if w_mod is not None: - return w_mod - f = os.path.join(space.unwrap(path), partname + '.py') - w_mod = try_import_mod(w_modulename, f, w_parent, w(partname)) - if w_mod is not None: - return w_mod - - if tentative: - return None - else: - # ImportError - w_failing = w_modulename - w_exc = space.call_function(space.w_ImportError, w_failing) - raise OperationError(space.w_ImportError, w_exc) - - -def compile(w_str, w_filename, w_startstr, - w_supplied_flags=None, w_dont_inherit=None): - str_ = space.unwrap(w_str) - filename = space.unwrap(w_filename) - startstr = space.unwrap(w_startstr) - supplied_flags = space.unwrapdefault(w_supplied_flags, 0) - dont_inherit = space.unwrapdefault(w_dont_inherit, 0) - - #print (str_, filename, startstr, supplied_flags, dont_inherit) - # XXX we additionally allow GENERATORS because compiling some builtins - # requires it. doesn't feel quite right to do that here. - try: - c = cpy_builtin.compile(str_, filename, startstr, supplied_flags|4096, dont_inherit) - # It would be nice to propagate all exceptions to app level, - # but here we only propagate the 'usual' ones, until we figure - # out how to do it generically. - except SyntaxError,e: - raise OperationError(space.w_SyntaxError,space.wrap(str(e))) - except ValueError,e: - raise OperationError(space.w_ValueError,space.wrap(str(e))) - except TypeError,e: - raise OperationError(space.w_TypeError,space.wrap(str(e))) - return space.wrap(PyCode()._from_code(c)) - -def eval(w_source, w_globals=None, w_locals=None): - w = space.wrap - - if space.is_true(space.isinstance(w_source, space.w_str)): - w_codeobj = compile(w_source, w("<string>"), w("eval")) - elif isinstance(space.unwrap(w_source), PyCode): - w_codeobj = w_source - else: - raise OperationError(space.w_TypeError, - w('eval() arg 1 must be a string or code object')) - - if w_globals is None: - w_globals = globals() - w_locals = locals() - elif w_locals is None: - w_locals = w_globals - - return space.unwrap(w_codeobj).exec_code(space, w_globals, w_locals) - -def abs(w_val): - "abs(number) -> number\n\nReturn the absolute value of the argument." - return space.abs(w_val) - -def chr(w_ascii): - w_character = space.newstring([w_ascii]) - return w_character - -def len(w_obj): - return space.len(w_obj) - -def delattr(w_object, w_name): - space.delattr(w_object, w_name) - return space.w_None - -def getattr(w_object, w_name, w_defvalue = _noarg): - try: - return space.getattr(w_object, w_name) - except OperationError, e: - if e.match(space, space.w_AttributeError): - if w_defvalue is not _noarg: - return w_defvalue - raise - -def hash(w_object): - return space.hash(w_object) - -def oct(w_val): - # XXX does this need to be a space operation? - return space.oct(w_val) - -def hex(w_val): - return space.hex(w_val) - -def round(w_val, w_n=None): - if w_n is None: - w_n = space.wrap(0) - return space.round(w_val, w_n) - -def id(w_object): - return space.id(w_object) - -#XXX works only for new-style classes. -#So we have to fix it, when we add support for old-style classes -def _issubtype(w_cls1, w_cls2): - return space.issubtype(w_cls1, w_cls2) - -def iter(w_collection_or_callable, w_sentinel = _noarg): - if w_sentinel is _noarg: - return space.iter(w_collection_or_callable) - else: - if not space.is_true(callable(w_collection_or_callable)): - raise OperationError(space.w_TypeError, - space.wrap('iter(v, w): v must be callable')) - return _iter_generator(w_collection_or_callable, w_sentinel) - -def ord(w_val): - return space.ord(w_val) - -def pow(w_base, w_exponent, w_modulus=None): - if w_modulus is None: - w_modulus = space.w_None - return space.pow(w_base, w_exponent, w_modulus) - -def repr(w_object): - return space.repr(w_object) - -def setattr(w_object, w_name, w_val): - space.setattr(w_object, w_name, w_val) - return space.w_None diff --git a/pypy/module/__builtin__module.py b/pypy/module/__builtin__module.py deleted file mode 100644 index 1cb5b688db..0000000000 --- a/pypy/module/__builtin__module.py +++ /dev/null @@ -1,883 +0,0 @@ -"""Built-in functions, exceptions, and other objects. - -Noteworthy: None is the `nil' object; Ellipsis represents `...' in slices. -""" - -__builtins__['None'] = __interplevel__eval('space.w_None') -__builtins__['False'] = __interplevel__eval('space.w_False') -__builtins__['True'] = __interplevel__eval('space.w_True') -__builtins__['type'] = __interplevel__eval('space.w_type') -__builtins__['__debug__'] = True - -object = __interplevel__eval('space.w_object') -# XXX these are faked: -unicode = __interplevel__eval('space.wrap(unicode)') -file = __interplevel__eval('space.wrap(file)') -open = file - -# TODO Fix this later to show Ctrl-D on Unix -quit = exit = "Use Ctrl-Z (i.e. EOF) to exit." - -def execfile(filename, glob=None, loc=None): - if glob is None: - glob = _caller_globals() - if loc is None: - loc = _caller_locals() - elif loc is None: - loc = glob - f = file(filename) - try: - source = f.read() - finally: - f.close() - #Don't exec the source directly, as this loses the filename info - co = compile(source, filename, 'exec') - exec co in glob, loc - -def raw_input(prompt=None): - import sys - if prompt is not None: - sys.stdout.write(prompt) - sys.stdout.flush() - line = sys.stdin.readline() - if not line: # inputting an empty line gives line == '\n' - raise EOFError - return line - -def input(): - return eval(raw_input()) - - -def sum(sequence, total=0): - # must forbid "summing" strings, per specs of built-in 'sum' - if isinstance(total, str): raise TypeError - for item in sequence: - total = total + item - return total - -def _iter_generator(callable_, sentinel): - """ This generator implements the __iter__(callable,sentinel) protocol """ - while 1: - result = callable_() - if result == sentinel: - return - yield result - -def enumerate(collection): - 'Generates an indexed series: (0,coll[0]), (1,coll[1]) ...' - i = 0 - it = iter(collection) - while 1: - yield (i, it.next()) - i += 1 - -def apply(function, args, kwds={}): - """call a function (or other callable object) and return its result""" - return function(*args, **kwds) - -def map(function, *collections): - """does 3 separate things, hence this enormous docstring. - 1. if function is None, return a list of tuples, each with one - item from each collection. If the collections have different - lengths, shorter ones are padded with None. - - 2. if function is not None, and there is only one collection, - apply function to every item in the collection and return a - list of the results. - - 3. if function is not None, and there are several collections, - repeatedly call the function with one argument from each - collection. If the collections have different lengths, - shorter ones are padded with None - """ - - if len(collections) == 0: - raise TypeError, "map() requires at least one sequence" - - elif len(collections) == 1: - #it's the most common case, so make it faster - if function is None: - return list(collections[0]) - else: - return [function(x) for x in collections[0]] - else: - res = [] - idx = 0 - while 1: - cont = 0 #is any collection not empty? - args = [] - for collection in collections: - try: - elem = collection[idx] - cont = cont + 1 - except IndexError: - elem = None - args.append(elem) - if cont: - if function is None: - res.append(tuple(args)) - else: - res.append(function(*args)) - else: - return res - idx = idx + 1 - -def filter(function, collection): - """construct a list of those elements of collection for which function - is True. If function is None, then return the items in the sequence - which are True.""" - - if function is None: - res = [item for item in collection if item] - else: - res = [item for item in collection if function(item)] - - if type(collection) is tuple: - return tuple(res) - elif type(collection) is str: - return "".join(res) - else: - return res - -def zip(*collections): - """return a list of tuples, where the nth tuple contains every - nth item of each collection. If the collections have different - lengths, zip returns a list as long as the shortest collection, - ignoring the trailing items in the other collections.""" - - if len(collections) == 0: - raise TypeError, "zip() requires at least one sequence" - res = [] - idx = 0 - while 1: - try: - elems = [] - for collection in collections: - elems.append(collection[idx]) - res.append(tuple(elems)) - except IndexError: - break - idx = idx + 1 - return res - -def reduce(function, l, *initialt): - """ Apply function of two arguments cumulatively to the items of - sequence, from left to right, so as to reduce the sequence to a - single value. Optionally begin with an initial value.""" - - if initialt: - initial, = initialt - idx = 0 - else: - try: - initial = l[0] - except IndexError: - raise TypeError, "reduce() of empty sequence with no initial value" - idx = 1 - while 1: - try: - initial = function(initial, l[idx]) - idx = idx + 1 - except IndexError: - break - return initial - -def issubclass(cls, klass_or_tuple): - if _issubtype(type(klass_or_tuple), tuple): - for klass in klass_or_tuple: - if issubclass(cls, klass): - return True - return False - try: - return _issubtype(cls, klass_or_tuple) - except TypeError: - raise TypeError, "arg 2 must be a class or type or a tuple thereof" - -def isinstance(obj, klass_or_tuple): - if issubclass(type(obj), klass_or_tuple): - return True - try: - objcls = obj.__class__ - except AttributeError: - return False - else: - return objcls is not type(obj) and issubclass(objcls, klass_or_tuple) - -def range(x, y=None, step=1): - """ returns a list of integers in arithmetic position from start (defaults - to zero) to stop - 1 by step (defaults to 1). Use a negative step to - get a list in decending order.""" - - if y is None: - start = 0 - stop = x - else: - start = x - stop = y - - if step == 0: - raise ValueError, 'range() arg 3 must not be zero' - - elif step > 0: - if stop <= start: # no work for us - return [] - howmany = (stop - start + step - 1)/step - - else: # step must be < 0, or we would have raised ValueError - if stop >= start: # no work for us - return [] - howmany = (start - stop - step - 1)/-step - - arr = [None] * howmany # this is to avoid using append. - - i = start - n = 0 - while n < howmany: - arr[n] = i - i += step - n += 1 - - return arr - -# min and max could be one function if we had operator.__gt__ and -# operator.__lt__ Perhaps later when we have operator. - -def min(*arr): - """return the smallest number in a list""" - - if not arr: - raise TypeError, 'min() takes at least one argument' - - if len(arr) == 1: - arr = arr[0] - - iterator = iter(arr) - try: - min = iterator.next() - except StopIteration: - raise ValueError, 'min() arg is an empty sequence' - - for i in iterator: - if min > i: - min = i - return min - -def max(*arr): - """return the largest number in a list""" - - if not arr: - raise TypeError, 'max() takes at least one argument' - - if len(arr) == 1: - arr = arr[0] - - iterator = iter(arr) - try: - max = iterator.next() - except StopIteration: - raise ValueError, 'max() arg is an empty sequence' - - for i in iterator: - if max < i: - max = i - return max - -def divmod(x, y): - return x//y, x%y - -def cmp(x, y): - """return 0 when x == y, -1 when x < y and 1 when x > y """ - if x < y: - return -1 - elif x == y: - return 0 - else: - return 1 - -def vars(*obj): - """return a dictionary of all the attributes currently bound in obj. If - called with no argument, return the variables bound in local scope.""" - - if len(obj) == 0: - return _caller_locals() - elif len(obj) != 1: - raise TypeError, "vars() takes at most 1 argument." - else: - try: - return obj[0].__dict__ - except AttributeError: - raise TypeError, "vars() argument must have __dict__ attribute" - -def hasattr(ob, attr): - try: - getattr(ob, attr) - return True - except AttributeError: - return False - -def callable(ob): - for c in type(ob).__mro__: - if '__call__' in c.__dict__: - return True - else: - return False - -def dir(*args): - """dir([object]) -> list of strings - - Return an alphabetized list of names comprising (some of) the attributes - of the given object, and of attributes reachable from it: - - No argument: the names in the current scope. - Module object: the module attributes. - Type or class object: its attributes, and recursively the attributes of - its bases. - Otherwise: its attributes, its class's attributes, and recursively the - attributes of its class's base classes. - """ - if len(args) > 1: - raise TypeError("dir expected at most 1 arguments, got %d" - % len(args)) - if len(args) == 0: - local_names = _caller_locals().keys() # 2 stackframes away - local_names.sort() - return local_names - - import types - def _classdir(klass): - """Return a dict of the accessible attributes of class/type klass. - - This includes all attributes of klass and all of the - base classes recursively. - - The values of this dict have no meaning - only the keys have - meaning. - """ - Dict = {} - try: - Dict.update(klass.__dict__) - except AttributeError: pass - try: - # XXX - Use of .__mro__ would be suggested, if the existance - # of that attribute could be guarranted. - bases = klass.__bases__ - except AttributeError: pass - else: - try: - #Note that since we are only interested in the keys, - # the order we merge classes is unimportant - for base in bases: - Dict.update(_classdir(base)) - except TypeError: pass - return Dict - #End _classdir - - obj = args[0] - - if isinstance(obj, types.ModuleType): - try: - result = obj.__dict__.keys() - result.sort() - return result - except AttributeError: - return [] - - elif isinstance(obj, (types.TypeType, types.ClassType)): - #Don't look at __class__, as metaclass methods would be confusing. - result = _classdir(obj).keys() - result.sort() - return result - - else: #(regular item) - Dict = {} - try: - Dict.update(obj.__dict__) - except AttributeError: pass - try: - Dict.update(_classdir(obj.__class__)) - except AttributeError: pass - - ## Comment from object.c: - ## /* Merge in __members__ and __methods__ (if any). - ## XXX Would like this to go away someday; for now, it's - ## XXX needed to get at im_self etc of method objects. */ - for attr in ['__members__','__methods__']: - try: - for item in getattr(obj, attr): - if isinstance(item, types.StringTypes): - Dict[item] = None - except (AttributeError, TypeError): pass - - result = Dict.keys() - result.sort() - return result - -_stringtable = {} -def intern(s): - # XXX CPython has also non-immortal interned strings - if not isinstance(s, str): - raise TypeError("intern() argument 1 must be string.") - return _stringtable.setdefault(s,s) - -def copyright(): - print 'Copyright 2003-2004 Pypy development team.\nAll rights reserved.\nFor further information see http://www.codespaek.net/pypy.\nSome materials may have a different copyright.\nIn these cases, this is explicitly noted in the source code file.' - -def license(): - print \ -""" -Copyright (c) <2003-2004> <Pypy development team> - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -""" - -def help(): - print "You must be joking." - - -# ______________________________________________________________________ -# -# Interpreter-level function definitions -# - -__interplevel__execfile('__builtin__interp.py') - -from __interplevel__ import abs, chr, len, ord, pow, repr -from __interplevel__ import hash, oct, hex, round -from __interplevel__ import getattr, setattr, delattr, iter, hash, id -from __interplevel__ import _issubtype -from __interplevel__ import compile, eval -from __interplevel__ import globals, locals, _caller_globals, _caller_locals - -# The following must be the last import from __interplevel__ because it -# overwrites the special __import__ hook with the normal one. -from __interplevel__ import __import__ - - -# ________________________________________________________________________ - -class xrange: - def __init__(self, start, stop=None, step=1): - if stop is None: - self.start = 0 - self.stop = start - else: - self.start = start - self.stop = stop - if step == 0: - raise ValueError, 'xrange() step-argument (arg 3) must not be zero' - self.step = step - - def __len__(self): - if not hasattr(self, '_len'): - slicelength = self.stop - self.start - lengthsign = cmp(slicelength, 0) - stepsign = cmp(self.step, 0) - if stepsign == lengthsign: - self._len = (slicelength - lengthsign) // self.step + 1 - else: - self._len = 0 - return self._len - - def __getitem__(self, index): - # xrange does NOT support slicing - if not isinstance(index, int): - raise TypeError, "sequence index must be integer" - len = self.__len__() - if index<0: - index += len - if 0 <= index < len: - return self.start + index * self.step - raise IndexError, "xrange object index out of range" - - def __iter__(self): - start, stop, step = self.start, self.stop, self.step - i = start - if step > 0: - while i < stop: - yield i - i+=step - else: - while i > stop: - yield i - i+=step - - -# Descriptor code, shamelessly stolen to Raymond Hettinger: -# http://users.rcn.com/python/download/Descriptor.htm -class property(object): - - def __init__(self, fget=None, fset=None, fdel=None, doc=None): - self.fget = fget - self.fset = fset - self.fdel = fdel - self.__doc__ = doc or "" # XXX why: or "" ? - - def __get__(self, obj, objtype=None): - if obj is None: - return self - if self.fget is None: - raise AttributeError, "unreadable attribute" - return self.fget(obj) - - def __set__(self, obj, value): - if self.fset is None: - raise AttributeError, "can't set attribute" - self.fset(obj, value) - - def __delete__(self, obj): - if self.fdel is None: - raise AttributeError, "can't delete attribute" - self.fdel(obj) - - -# XXX there is an interp-level pypy.interpreter.function.StaticMethod -# XXX because __new__ needs to be a StaticMethod early. -class staticmethod(object): - - def __init__(self, f): - self.f = f - - def __get__(self, obj, objtype=None): - return self.f - - -class classmethod(object): - - def __init__(self, f): - self.f = f - - def __get__(self, obj, klass=None): - if klass is None: - klass = type(obj) - def newfunc(*args): - return self.f(klass, *args) - return newfunc - -if not hasattr(dict, 'fromkeys'): - def _fromkeys(cls, seq, value=None): - r = cls() - for s in seq: - r[s] = value - return r - - try: - dict.fromkeys = classmethod(_fromkeys) - except TypeError: - pass # Python2.2 with trivial object space - - del _fromkeys - - -# super is a modified version from Guido's tutorial -# http://www.python.org/2.2.3/descrintro.html -# it exposes the same special attributes as CPython's. -class super(object): - def __init__(self, typ, obj=None): - if obj is None: - objcls = None # unbound super object - elif _issubtype(type(obj), type) and _issubtype(obj, type): - objcls = obj # special case for class methods - elif _issubtype(type(obj), typ): - objcls = type(obj) # normal case - else: - objcls = getattr(obj, '__class__', type(obj)) - if not _issubtype(objcls, typ): - raise TypeError, ("super(type, obj): " - "obj must be an instance or subtype of type") - self.__thisclass__ = typ - self.__self__ = obj - self.__self_class__ = objcls - def __get__(self, obj, type=None): - ga = object.__getattribute__ - if ga(self, '__self__') is None and obj is not None: - return super(ga(self, '__thisclass__'), obj) - else: - return self - def __getattribute__(self, attr): - d = object.__getattribute__(self, '__dict__') - if attr != '__class__' and d['__self_class__'] is not None: - # we want super().__class__ to be the real class - # and we don't do anything for unbound type objects - mro = iter(d['__self_class__'].__mro__) - for cls in mro: - if cls is d['__thisclass__']: - break - # Note: mro is an iterator, so the second loop - # picks up where the first one left off! - for cls in mro: - try: - x = cls.__dict__[attr] - except KeyError: - continue - if hasattr(x, '__get__'): - x = x.__get__(d['__self__'], type(d['__self__'])) - return x - return object.__getattribute__(self, attr) # fall-back - -class complex(object): - """complex(real[, imag]) -> complex number - - Create a complex number from a real part and an optional imaginary part. - This is equivalent to (real + imag*1j) where imag defaults to 0.""" - PREC_REPR = 17 - PREC_STR = 12 - - # XXX this class is not well tested - - def __init__(self, real=0.0, imag=None): - if isinstance(real, str) and imag is not None: - msg = "complex() can't take second arg if first is a string" - raise TypeError, msg - - if isinstance(imag, str): - msg = "complex() second arg can't be a string" - raise TypeError, msg - - if isinstance(real, str): - real, imag = self._makeComplexFromString(real) - self.__dict__['real'] = real - self.__dict__['imag'] = imag - else: - if imag is None: - imag = 0. - self.__dict__['real'] = float(real) - self.__dict__['imag'] = float(imag) - - - def __setattr__(self, name, value): - if name in ('real', 'imag'): - raise AttributeError, "readonly attribute" - else: - raise AttributeError, "'complex' object has no attribute %s" % name - - def _makeComplexFromString(self, string): - import re - pat = re.compile(" *([\+\-]?\d*\.?\d*)([\+\-]?\d*\.?\d*)[jJ] *") - m = pat.match(string) - x, y = m.groups() - if len(y) == 1 and y in '+-': - y = y + '1.0' - x, y = map(float, [x, y]) - return x, y - - - def __description(self, precision): - if self.real != 0.: - return "(%.*g%+.*gj)"%(precision, self.real, precision, self.imag) - else: - return "%.*gj"%(precision, self.imag) - - - def __repr__(self): - return self.__description(self.PREC_REPR) - - - def __str__(self): - return self.__description(self.PREC_STR) - - - def __hash__(self): - hashreal = hash(self.real) - hashimag = hash(self.imag) - - # Note: if the imaginary part is 0, hashimag is 0 now, - # so the following returns hashreal unchanged. This is - # important because numbers of different types that - # compare equal must have the same hash value, so that - # hash(x + 0*j) must equal hash(x). - - return hashreal + 1000003 * hashimag - - - def __add__(self, other): - self, other = self.__coerce__(other) - real = self.real + other.real - imag = self.imag + other.imag - return complex(real, imag) - - __radd__ = __add__ - - def __sub__(self, other): - self, other = self.__coerce__(other) - real = self.real - other.real - imag = self.imag - other.imag - return complex(real, imag) - - def __rsub__(self, other): - self, other = self.__coerce__(other) - return other.__sub__(self) - - def __mul__(self, other): - self, other = self.__coerce__(other) - real = self.real*other.real - self.imag*other.imag - imag = self.real*other.imag + self.imag*other.real - return complex(real, imag) - - __rmul__ = __mul__ - - def __div__(self, other): - self, other = self.__coerce__(other) - if abs(other.real) >= abs(other.imag): - # divide tops and bottom by other.real - try: - ratio = other.imag / other.real - except ZeroDivisionError: - raise ZeroDivisionError, "complex division" - denom = other.real + other.imag * ratio - real = (self.real + self.imag * ratio) / denom - imag = (self.imag - self.real * ratio) / denom - else: - # divide tops and bottom by other.imag - assert other.imag != 0.0 - ratio = other.real / other.imag - denom = other.real * ratio + other.imag - real = (self.real * ratio + self.imag) / denom - imag = (self.imag * ratio - self.real) / denom - - return complex(real, imag) - - def __rdiv__(self, other): - self, other = self.__coerce__(other) - return other.__div__(self) - - def __floordiv__(self, other): - div, mod = self.__divmod__(other) - return div - - def __rfloordiv__(self, other): - self, other = self.__coerce__(other) - return other.__floordiv__(self) - - __truediv__ = __div__ - __rtruediv__ = __rdiv__ - - def __mod__(self, other): - div, mod = self.__divmod__(other) - return mod - - def __rmod__(self, other): - self, other = self.__coerce__(other) - return other.__mod__(self) - - def __divmod__(self, other): - self, other = self.__coerce__(other) - - import warnings, math - warnings.warn("complex divmod(), // and % are deprecated", DeprecationWarning) - - try: - div = self/other # The raw divisor value. - except ZeroDivisionError: - raise ZeroDivisionError, "complex remainder" - div = complex(math.floor(div.real), 0.0) - mod = self - div*other - return div, mod - - - def __pow__(self, other, mod=None): - a, b = self.__coerce__(other) - import math - - if b.real == 0. and b.imag == 0.: - real = 1. - imag = 0. - elif a.real == 0. and a.imag == 0.: - real = 0. - imag = 0. - else: - vabs = math.hypot(a.real,a.imag) - len = math.pow(vabs,b.real) - at = math.atan2(a.imag, a.real) - phase = at*b.real - if b.imag != 0.0: - len /= math.exp(at*b.imag) - phase += b.imag*math.log(vabs) - real = len*math.cos(phase) - imag = len*math.sin(phase) - - result = complex(real, imag) - if mod is not None: - result %= mod - return result - - def __rpow__(self, other, mod=None): - self, other = self.__coerce__(other) - return other.__pow__(self, mod) - - def __neg__(self): - return complex(-self.real, -self.imag) - - - def __pos__(self): - return complex(self.real, self.imag) - - - def __abs__(self): - import math - result = math.hypot(self.real, self.imag) - return float(result) - - - def __nonzero__(self): - return self.real != 0.0 or self.imag != 0.0 - - - def __coerce__(self, other): - if isinstance(other, complex): - return self, other - if isinstance(other, (int, long, float)): - return self, complex(other) - raise TypeError, "number %r coercion failed" % (type(other),) - - - def conjugate(self): - return complex(self.real, -self.imag) - - def __eq__(self, other): - self, other = self.__coerce__(other) - return self.real == other.real and self.imag == other.imag - - def __ne__(self, other): - self, other = self.__coerce__(other) - return self.real != other.real or self.imag != other.imag - - - # unsupported operations - - def __lt__(self, other): - raise TypeError, "cannot compare complex numbers using <, <=, >, >=" - - - def __le__(self, other): - raise TypeError, "cannot compare complex numbers using <, <=, >, >=" - - - def __gt__(self, other): - raise TypeError, "cannot compare complex numbers using <, <=, >, >=" - - - def __ge__(self, other): - raise TypeError, "cannot compare complex numbers using <, <=, >, >=" - - - def __int__(self): - raise TypeError, "can't convert complex to int; use e.g. int(abs(z))" - - - def __long__(self): - raise TypeError, "can't convert complex to long; use e.g. long(abs(z))" - - - def __float__(self): - raise TypeError, "can't convert complex to float; use e.g. float(abs(z))" - - -# ________________________________________________________________________ - -class buffer: - def __init__(self, object, offset=None, size=None): - raise NotImplementedError, "XXX nobody needs this anyway" diff --git a/pypy/module/__init__.py b/pypy/module/__init__.py deleted file mode 100644 index 1bb8bf6d7f..0000000000 --- a/pypy/module/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/pypy/module/sysinterp.py b/pypy/module/sysinterp.py deleted file mode 100644 index 2a538db832..0000000000 --- a/pypy/module/sysinterp.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -Implementation of interpreter-level 'sys' routines. -""" -#from pypy.interpreter.module import Module -from pypy.interpreter.error import OperationError - -import sys as cpy_sys - -def hack_cpython_module(modname): - "NOT_RPYTHON. Steal a module from CPython." - cpy_module = __import__(modname, globals(), locals(), None) - return cpy_module -## # to build the dictionary of the module, we get all the objects -## # accessible as 'self.xxx'. Methods are bound. -## contents = [] -## for name in cpy_module.__dict__: -## # ignore names in '_xyz' -## if not name.startswith('_') or name.endswith('_'): -## value = cpy_module.__dict__[name] -## contents.append((space.wrap(name), space.wrap(value))) -## w_contents = space.newdict(contents) -## return Module(space, space.wrap(modname), w_contents) - -# ____________________________________________________________ -# -# List of built-in modules. -# It should contain the name of all the files 'module/*module.py'. -builtin_module_names = ['__builtin__', 'sys', - ] - -# Create the builtin_modules dictionary, mapping names to Module instances -builtin_modules = {} -for fn in builtin_module_names: - builtin_modules[fn] = None - -# The following built-in modules are not written in PyPy, so we -# steal them from Python. -for fn in ['posix', 'nt', 'os2', 'mac', 'ce', 'riscos', - 'itertools', 'math', '_codecs', - '_random', '_sre', 'time', '_socket', 'errno', - 'marshal', 'struct', 'binascii', 'parser']: - if fn not in builtin_modules: - try: - builtin_modules[fn] = hack_cpython_module(fn) - except ImportError: - pass - else: - builtin_module_names.append(fn) - -# ____________________________________________________________ -# -# Common data structures -w_modules = space.newdict([]) -w_warnoptions = space.newlist([]) -w_argv = space.newlist([]) -builtin_module_names.sort() -w_builtin_module_names = space.newtuple([space.wrap(fn) - for fn in builtin_module_names]) - -# Initialize the default path -import os -from pypy.interpreter import autopath -srcdir = os.path.dirname(autopath.pypydir) -appdir = os.path.join(autopath.pypydir, 'appspace') -del os, autopath # XXX for the translator. Something is very wrong around here. - -w_initialpath = space.newlist([space.wrap(''), space.wrap(appdir)] + - [space.wrap(p) for p in cpy_sys.path if p!= srcdir]) - -# XXX - Replace with appropriate PyPy version numbering -w_hexversion = space.wrap(cpy_sys.hexversion) -w_platform = space.wrap(cpy_sys.platform) -w_maxint = space.wrap(cpy_sys.maxint) -w_byteorder = space.wrap(cpy_sys.byteorder) - -w_stdin = space.wrap(cpy_sys.stdin) -w_stdout = space.wrap(cpy_sys.stdout) -w_stderr = space.wrap(cpy_sys.stderr) - -w_pypy_objspaceclass = space.wrap(space.__class__.__name__) - -# ____________________________________________________________ - -def setmodule(w_module): - """ put a module into the modules dict """ - w_name = space.getattr(w_module, space.wrap('__name__')) - space.setitem(w_modules, w_name, w_module) - -def setbuiltinmodule(w_module, name): - """ put a module into the modules builtin_modules dicts """ - if builtin_modules[name] is None: - builtin_modules[name] = space.unwrap(w_module) - else: - assert builtin_modules[name] is space.unwrap(w_module), ( - "trying to change the builtin-in module %r" % (name,)) - space.setitem(w_modules, space.wrap(name), w_module) - -##def displayhook(w_x): -## w = space.wrap -## if not space.is_true(space.is_(w_x, space.w_None)): -## w_stdout = space.getattr(space.w_sys, space.wrap('stdout')) -## try: -## w_repr = space.repr(w_x) -## except OperationError: -## w_repr = space.wrap("! __repr__ raised an exception") -## w_repr = space.add(w_repr, space.wrap("\n")) -## space.call_method(w_stdout, 'write', -## space.setitem(space.w_builtins, w('_'), w_x) - -def _getframe(): - # XXX No Argument Accepted Yet - f = space.getexecutioncontext().framestack.items[-1] - return space.wrap(f) - -def exc_info(): - operror = space.getexecutioncontext().sys_exc_info() - if operror is None: - return space.newtuple([space.w_None,space.w_None,space.w_None]) - else: - return space.newtuple([operror.w_type,operror.w_value, - space.wrap(operror.application_traceback)]) diff --git a/pypy/module/sysmodule.py b/pypy/module/sysmodule.py deleted file mode 100644 index baffd817d7..0000000000 --- a/pypy/module/sysmodule.py +++ /dev/null @@ -1,40 +0,0 @@ -""" -The 'sys' module. -""" - -__interplevel__execfile('sysinterp.py') - -# Common data structures -from __interplevel__ import initialpath as path -from __interplevel__ import modules, argv -from __interplevel__ import warnoptions, builtin_module_names - -# Objects from interpreter-level -from __interplevel__ import stdin, stdout, stderr, maxint -from __interplevel__ import hexversion, platform, byteorder -from __interplevel__ import pypy_objspaceclass - -# Functions from interpreter-level -from __interplevel__ import _getframe, exc_info - -# Dummy -executable = '' -prefix = '' -version = '0.5.0 (not released yet)' -ps1 = '>>>> ' -ps2 = '.... ' - -# XXX not called by the core yet -def excepthook(exctype, value, traceback): - from traceback import print_exception - print_exception(exctype, value, traceback) - -def exit(exitcode=0): - raise SystemExit(exitcode) - -def displayhook(obj): - if obj is not None: - __builtins__['_'] = obj - # NB. this is slightly more complicated in CPython, - # see e.g. the difference with >>> print 5,; 8 - print `obj` diff --git a/pypy/module/test/__init__.py b/pypy/module/test/__init__.py deleted file mode 100644 index 1bb8bf6d7f..0000000000 --- a/pypy/module/test/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/pypy/module/test/applevel_in_cpython.py b/pypy/module/test/applevel_in_cpython.py deleted file mode 100644 index f7e7974d0c..0000000000 --- a/pypy/module/test/applevel_in_cpython.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Hack to (partially) import the app-level parts of PyPy modules -directly in CPython, when the goal is to test these app-level parts and -not necessarily PyPy's interpretation of them. -""" - -import autopath, new, sys - - -def applevel_in_cpython(modulename): - try: - real_mod = __import__(modulename) - except ImportError: - real_mod = None - - class DunnoType: - def __repr__(self): - return "<this would come from interp-level if we had one>" - Dunno = DunnoType() - - def ouack_eval(s): - return { - 'space.w_None': None, - 'space.w_False': False, - 'space.w_True': True, - 'space.w_type': type, - 'space.w_object': object, - 'space.wrap(unicode)': unicode, - 'space.wrap(file)': file, - }.get(s, Dunno) - - def ouack_execfile(s): - pass - - class OuackModule: - def __getattr__(self, name): - return getattr(real_mod, name, Dunno) - ouack_module = OuackModule() - ouack_module._issubtype = issubclass - - - from os.path import join - filename = join(autopath.pypydir, 'module', '%smodule.py' % modulename) - mod = new.module('applevel_in_cpython:%s' % modulename) - mod.__dict__.update({ - '__file__': filename, - '__interplevel__eval': ouack_eval, - '__interplevel__execfile': ouack_execfile, - }) - sys.modules['__interplevel__'] = ouack_module - try: - execfile(filename, mod.__dict__) - finally: - del sys.modules['__interplevel__'] - return mod diff --git a/pypy/module/test/autopath.py b/pypy/module/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/module/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/module/test/impsubdir/a.py b/pypy/module/test/impsubdir/a.py deleted file mode 100644 index eaa2b0585f..0000000000 --- a/pypy/module/test/impsubdir/a.py +++ /dev/null @@ -1,3 +0,0 @@ -# a.py -imamodule = 1 -inpackage = 0 diff --git a/pypy/module/test/impsubdir/ambig.py b/pypy/module/test/impsubdir/ambig.py deleted file mode 100644 index 17904c4d80..0000000000 --- a/pypy/module/test/impsubdir/ambig.py +++ /dev/null @@ -1 +0,0 @@ -imamodule = 1 diff --git a/pypy/module/test/impsubdir/ambig/__init__.py b/pypy/module/test/impsubdir/ambig/__init__.py deleted file mode 100644 index 577a0d7c39..0000000000 --- a/pypy/module/test/impsubdir/ambig/__init__.py +++ /dev/null @@ -1 +0,0 @@ -imapackage = 1 diff --git a/pypy/module/test/impsubdir/b.py b/pypy/module/test/impsubdir/b.py deleted file mode 100644 index 241214d2b6..0000000000 --- a/pypy/module/test/impsubdir/b.py +++ /dev/null @@ -1,3 +0,0 @@ -# b.py -imamodule = 1 -inpackage = 0 diff --git a/pypy/module/test/impsubdir/pkg/__init__.py b/pypy/module/test/impsubdir/pkg/__init__.py deleted file mode 100644 index 5bb534f795..0000000000 --- a/pypy/module/test/impsubdir/pkg/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# package diff --git a/pypy/module/test/impsubdir/pkg/a.py b/pypy/module/test/impsubdir/pkg/a.py deleted file mode 100644 index bc2a1a0b69..0000000000 --- a/pypy/module/test/impsubdir/pkg/a.py +++ /dev/null @@ -1,3 +0,0 @@ -# a.py -imamodule = 1 -inpackage = 1 diff --git a/pypy/module/test/impsubdir/pkg/abs_b.py b/pypy/module/test/impsubdir/pkg/abs_b.py deleted file mode 100644 index 0916033493..0000000000 --- a/pypy/module/test/impsubdir/pkg/abs_b.py +++ /dev/null @@ -1 +0,0 @@ -import b diff --git a/pypy/module/test/impsubdir/pkg/abs_x_y.py b/pypy/module/test/impsubdir/pkg/abs_x_y.py deleted file mode 100644 index 3301f468fa..0000000000 --- a/pypy/module/test/impsubdir/pkg/abs_x_y.py +++ /dev/null @@ -1 +0,0 @@ -import x.y diff --git a/pypy/module/test/impsubdir/pkg/pkg1/__init__.py b/pypy/module/test/impsubdir/pkg/pkg1/__init__.py deleted file mode 100644 index 5bb534f795..0000000000 --- a/pypy/module/test/impsubdir/pkg/pkg1/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# package diff --git a/pypy/module/test/impsubdir/pkg/pkg1/a.py b/pypy/module/test/impsubdir/pkg/pkg1/a.py deleted file mode 100644 index 1a7921546a..0000000000 --- a/pypy/module/test/impsubdir/pkg/pkg1/a.py +++ /dev/null @@ -1 +0,0 @@ -# a.py diff --git a/pypy/module/test/impsubdir/pkg/relative_a.py b/pypy/module/test/impsubdir/pkg/relative_a.py deleted file mode 100644 index b8d7b3cc11..0000000000 --- a/pypy/module/test/impsubdir/pkg/relative_a.py +++ /dev/null @@ -1 +0,0 @@ -import a diff --git a/pypy/module/test/impsubdir/pkg_r/__init__.py b/pypy/module/test/impsubdir/pkg_r/__init__.py deleted file mode 100644 index c51def1524..0000000000 --- a/pypy/module/test/impsubdir/pkg_r/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# pkg diff --git a/pypy/module/test/impsubdir/pkg_r/inpkg.py b/pypy/module/test/impsubdir/pkg_r/inpkg.py deleted file mode 100644 index 3301f468fa..0000000000 --- a/pypy/module/test/impsubdir/pkg_r/inpkg.py +++ /dev/null @@ -1 +0,0 @@ -import x.y diff --git a/pypy/module/test/impsubdir/pkg_r/x/__init__.py b/pypy/module/test/impsubdir/pkg_r/x/__init__.py deleted file mode 100644 index c51def1524..0000000000 --- a/pypy/module/test/impsubdir/pkg_r/x/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# pkg diff --git a/pypy/module/test/impsubdir/pkg_relative_a/__init__.py b/pypy/module/test/impsubdir/pkg_relative_a/__init__.py deleted file mode 100644 index b763308369..0000000000 --- a/pypy/module/test/impsubdir/pkg_relative_a/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# pkg -import a diff --git a/pypy/module/test/impsubdir/pkg_relative_a/a.py b/pypy/module/test/impsubdir/pkg_relative_a/a.py deleted file mode 100644 index bc2a1a0b69..0000000000 --- a/pypy/module/test/impsubdir/pkg_relative_a/a.py +++ /dev/null @@ -1,3 +0,0 @@ -# a.py -imamodule = 1 -inpackage = 1 diff --git a/pypy/module/test/impsubdir/pkg_substituted/__init__.py b/pypy/module/test/impsubdir/pkg_substituted/__init__.py deleted file mode 100644 index e966b61791..0000000000 --- a/pypy/module/test/impsubdir/pkg_substituted/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# __init__ diff --git a/pypy/module/test/impsubdir/pkg_substituted/mod.py b/pypy/module/test/impsubdir/pkg_substituted/mod.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/pypy/module/test/impsubdir/pkg_substituted/mod.py +++ /dev/null diff --git a/pypy/module/test/impsubdir/pkg_substituting/__init__.py b/pypy/module/test/impsubdir/pkg_substituting/__init__.py deleted file mode 100644 index 5399aeb92b..0000000000 --- a/pypy/module/test/impsubdir/pkg_substituting/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -import sys, pkg_substituted -sys.modules[__name__] = pkg_substituted diff --git a/pypy/module/test/impsubdir/x/__init__.py b/pypy/module/test/impsubdir/x/__init__.py deleted file mode 100644 index c51def1524..0000000000 --- a/pypy/module/test/impsubdir/x/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# pkg diff --git a/pypy/module/test/impsubdir/x/y.py b/pypy/module/test/impsubdir/x/y.py deleted file mode 100644 index 9d1b1eb893..0000000000 --- a/pypy/module/test/impsubdir/x/y.py +++ /dev/null @@ -1 +0,0 @@ -# y.py diff --git a/pypy/module/test/test_apply.py b/pypy/module/test/test_apply.py deleted file mode 100644 index 4a0d5f3b98..0000000000 --- a/pypy/module/test/test_apply.py +++ /dev/null @@ -1,27 +0,0 @@ -import autopath - - -# This is a very trivial series of tests. If apply is subtlely broken, -# we will have to find out some other way. - -class AppTestApply: - - def test_trivial_listonly(self): - def mymin(*args): - return min(list(args)) - - assert apply(mymin, [-1,-2,-3,-4]) == -4 - - def test_trivial_dictonly(self): - def mymin(*arr, **kwargs): - return min(list(arr) + kwargs.values()) - assert apply(mymin, - [], {'null' : 0, 'one': 1, 'two' : 2}) == ( - 0) - def test_trivial(self): - def mymin(*arr, **kwargs): - return min(list(arr) + kwargs.values()) - assert apply(mymin, - [-1,-2,-3,-4], - {'null' : 0, 'one': 1, 'two' : 2}) == ( - (-4)) diff --git a/pypy/module/test/test_builtin.py b/pypy/module/test/test_builtin.py deleted file mode 100644 index c2f7436e69..0000000000 --- a/pypy/module/test/test_builtin.py +++ /dev/null @@ -1,290 +0,0 @@ -import autopath - - -class AppTestBuiltinApp: - def test_import(self): - m = __import__('pprint') - assert m.pformat({}) == '{}' - assert m.__name__ == "pprint" - raises(ImportError, __import__, 'spamspam') - raises(TypeError, __import__, 1, 2, 3, 4) - - def test_chr(self): - assert chr(65) == 'A' - raises(ValueError, chr, -1) - raises(TypeError, chr, 'a') - - def test_intern(self): - raises(TypeError, intern) - raises(TypeError, intern, 1) - s = "never interned before" - s2 = intern(s) - assert s == s2 - s3 = s.swapcase() - assert s3 != s2 - s4 = s3.swapcase() - assert intern(s4) is s2 - - def test_globals(self): - d = {"foo":"bar"} - exec "def f(): return globals()" in d - d2 = d["f"]() - assert d2 is d - - def test_locals(self): - def f(): - return locals() - def g(c=0, b=0, a=0): - return locals() - assert f() == {} - assert g() == {'a':0, 'b':0, 'c':0} - - def test_dir(self): - def f(): - return dir() - def g(c=0, b=0, a=0): - return dir() - def nosp(x): return [y for y in x if y[0]!='_'] - assert f() == [] - assert g() == ['a', 'b', 'c'] - class X: pass - assert nosp(dir(X)) == [] - class X: - a = 23 - c = 45 - b = 67 - assert nosp(dir(X)) == ['a', 'b', 'c'] - - def test_vars(self): - def f(): - return vars() - def g(c=0, b=0, a=0): - return vars() - assert f() == {} - assert g() == {'a':0, 'b':0, 'c':0} - - def test_getattr(self): - class a: - i = 5 - assert getattr(a, 'i') == 5 - raises(AttributeError, getattr, a, 'k') - assert getattr(a, 'k', 42) == 42 - - def test_sum(self): - assert sum([]) ==0 - assert sum([42]) ==42 - assert sum([1,2,3]) ==6 - assert sum([],5) ==5 - assert sum([1,2,3],4) ==10 - - def test_type_selftest(self): - assert type(type) is type - - def test_iter_sequence(self): - raises(TypeError,iter,3) - x = iter(['a','b','c']) - assert x.next() =='a' - assert x.next() =='b' - assert x.next() =='c' - raises(StopIteration,x.next) - - def test_iter___iter__(self): - # This test assumes that dict.keys() method returns keys in - # the same order as dict.__iter__(). - # Also, this test is not as explicit as the other tests; - # it tests 4 calls to __iter__() in one assert. It could - # be modified if better granularity on the assert is required. - mydict = {'a':1,'b':2,'c':3} - assert list(iter(mydict)) ==mydict.keys() - - def test_iter_callable_sentinel(self): - class count(object): - def __init__(self): - self.value = 0 - def __call__(self): - self.value += 1 - return self.value - # XXX Raising errors is quite slow -- - # uncomment these lines when fixed - #self.assertRaises(TypeError,iter,3,5) - #self.assertRaises(TypeError,iter,[],5) - #self.assertRaises(TypeError,iter,{},5) - x = iter(count(),3) - assert x.next() ==1 - assert x.next() ==2 - raises(StopIteration,x.next) - - def test_enumerate(self): - seq = range(2,4) - enum = enumerate(seq) - assert enum.next() == (0, 2) - assert enum.next() == (1, 3) - raises(StopIteration, enum.next) - - def test_xrange_args(self): - x = xrange(2) - assert x.start == 0 - assert x.stop == 2 - assert x.step == 1 - - x = xrange(2,10,2) - assert x.start == 2 - assert x.stop == 10 - assert x.step == 2 - - raises(ValueError, xrange, 0, 1, 0) - - def test_xrange_up(self): - x = xrange(2) - assert x.start == 0 - assert x.stop == 2 - assert x.step == 1 - - iter_x = iter(x) - assert iter_x.next() == 0 - assert iter_x.next() == 1 - raises(StopIteration, iter_x.next) - - def test_xrange_down(self): - x = xrange(4,2,-1) - - iter_x = iter(x) - assert iter_x.next() == 4 - assert iter_x.next() == 3 - raises(StopIteration, iter_x.next) - - def test_xrange_has_type_identity(self): - assert type(xrange(1)) == type(xrange(1)) - - def test_xrange_len(self): - x = xrange(33) - assert len(x) == 33 - x = xrange(33,0,-1) - assert len(x) == 33 - x = xrange(33,0) - assert len(x) == 0 - x = xrange(0,33) - assert len(x) == 33 - x = xrange(0,33,-1) - assert len(x) == 0 - x = xrange(0,33,2) - assert len(x) == 17 - x = xrange(0,32,2) - assert len(x) == 16 - - def test_xrange_indexing(self): - x = xrange(0,33,2) - assert x[7] == 14 - assert x[-7] == 20 - raises(IndexError, x.__getitem__, 17) - raises(IndexError, x.__getitem__, -18) - raises(TypeError, x.__getitem__, slice(0,3,1)) - - def test_cmp(self): - assert cmp(9,9) == 0 - assert cmp(0,9) < 0 - assert cmp(9,0) > 0 - - def test_return_None(self): - class X: pass - x = X() - assert setattr(x, 'x', 11) == None - assert delattr(x, 'x') == None - # To make this test, we need autopath to work in application space. - #self.assertEquals(execfile('emptyfile.py'), None) - - def test_divmod(self): - assert divmod(15,10) ==(1,5) - - def test_callable(self): - class Call: - def __call__(self, a): - return a+2 - assert not not callable(Call()), ( - "Builtin function 'callable' misreads callable object") - assert callable(int), ( - "Builtin function 'callable' misreads int") - - def test_uncallable(self): - # XXX TODO: I made the NoCall class explicitly newstyle to try and - # remedy the failure in this test observed when running this with - # the trivial objectspace, but the test _still_ fails then (it - # doesn't fail with the standard objectspace, though). - class NoCall(object): - pass - a = NoCall() - assert not callable(a), ( - "Builtin function 'callable' misreads uncallable object") - a.__call__ = lambda: "foo" - assert not callable(a), ( - "Builtin function 'callable' tricked by instance-__call__") - - def test_hash(self): - assert hash(23) == hash(23) - assert hash(2.3) == hash(2.3) - assert hash('23') == hash("23") - assert hash((23,)) == hash((23,)) - assert hash(22) != hash(23) - raises(TypeError, hash, []) - raises(TypeError, hash, {}) - - def test_compile(self): - co = compile('1+2', '?', 'eval') - assert eval(co) == 3 - raises(SyntaxError, compile, '-', '?', 'eval') - raises(ValueError, compile, '"\\xt"', '?', 'eval') - raises(ValueError, compile, '1+2', '?', 'maybenot') - raises(TypeError, compile, '1+2', 12, 34) - - def test_isinstance(self): - assert isinstance(5, int) - assert isinstance(5, object) - assert not isinstance(5, float) - assert isinstance(True, (int, float)) - assert not isinstance(True, (type, float)) - assert isinstance(True, ((type, float), bool)) - raises(TypeError, isinstance, 5, 6) - raises(TypeError, isinstance, 5, (float, 6)) - - def test_issubclass(self): - assert issubclass(int, int) - assert issubclass(int, object) - assert not issubclass(int, float) - assert issubclass(bool, (int, float)) - assert not issubclass(bool, (type, float)) - assert issubclass(bool, ((type, float), bool)) - raises(TypeError, issubclass, 5, int) - raises(TypeError, issubclass, int, 6) - raises(TypeError, issubclass, int, (float, 6)) - - -class TestInternal: - - def setup_method(self,method): - space = self.space - - def get_builtin(self, name): - w = self.space.wrap - w_builtins = self.space.w_builtins - w_obj = self.space.getitem(w_builtins, w(name)) - return w_obj - - def test_execfile(self): - # we need cpython's tempfile currently to test - from tempfile import mktemp - fn = mktemp() - f = open(fn, 'w') - print >>f, "i=42" - f.close() - - try: - w_execfile = self.get_builtin('execfile') - space = self.space - w_dict = space.newdict([]) - self.space.call_function(w_execfile, - space.wrap(fn), w_dict, space.w_None) - w_value = space.getitem(w_dict, space.wrap('i')) - assert self.space.eq_w(w_value, space.wrap(42)) - finally: - import os - os.remove(fn) diff --git a/pypy/module/test/test_filter.py b/pypy/module/test/test_filter.py deleted file mode 100644 index 0e0cb25f40..0000000000 --- a/pypy/module/test/test_filter.py +++ /dev/null @@ -1,37 +0,0 @@ -import autopath - -# trivial functions for testing - -class AppTestFilter: - def test_filter_no_arguments(self): - raises(TypeError, filter) - - def test_filter_no_function_no_seq(self): - raises(TypeError, filter, None) - - def test_filter_function_no_seq(self): - raises(TypeError, filter, lambda x: x>3) - - def test_filter_function_too_many_args(self): - raises(TypeError, filter, lambda x: x>3, [1], [2]) - - def test_filter_no_function_list(self): - assert filter(None, [1, 2, 3]) == [1, 2, 3] - - def test_filter_no_function_tuple(self): - assert filter(None, (1, 2, 3)) == (1, 2, 3) - - def test_filter_no_function_string(self): - assert filter(None, 'mystring') == 'mystring' - - def test_filter_no_function_with_bools(self): - assert filter(None, (True, False, True)) == (True, True) - - def test_filter_list(self): - assert filter(lambda x: x>3, [1, 2, 3, 4, 5]) == [4, 5] - - def test_filter_tuple(self): - assert filter(lambda x: x>3, (1, 2, 3, 4, 5)) == (4, 5) - - def test_filter_string(self): - assert filter(lambda x: x>'a', 'xyzabcd') == 'xyzbcd' diff --git a/pypy/module/test/test_functional.py b/pypy/module/test/test_functional.py deleted file mode 100644 index 2f365f5030..0000000000 --- a/pypy/module/test/test_functional.py +++ /dev/null @@ -1,91 +0,0 @@ -import autopath - - -class AppTestMap: - - def test_trivial_map_one_seq(self): - assert map(lambda x: x+2, [1, 2, 3, 4]) == [3, 4, 5, 6] - - def test_trivial_map_two_seq(self): - assert map(lambda x,y: x+y, - [1, 2, 3, 4],[1, 2, 3, 4]) == ( - [2, 4, 6, 8]) - - def test_trivial_map_sizes_dont_match_and_should(self): - raises(TypeError, map, lambda x,y: x+y, [1, 2, 3, 4], [1, 2, 3]) - - def test_trivial_map_no_arguments(self): - raises(TypeError, map) - - def test_trivial_map_no_function_no_seq(self): - raises(TypeError, map, None) - - def test_trivial_map_no_fuction_one_seq(self): - assert map(None, [1, 2, 3]) == [1, 2, 3] - - def test_trivial_map_no_function(self): - assert map(None, [1,2,3], [4,5,6], [7,8], [1]) == ( - [(1, 4, 7, 1), (2, 5, 8, None), (3, 6, None, None)]) - - def test_map_identity1(self): - a = ['1', 2, 3, 'b', None] - b = a[:] - assert map(lambda x: x, a) == a - assert a == b - - def test_map_None(self): - a = ['1', 2, 3, 'b', None] - b = a[:] - assert map(None, a) == a - assert a == b - - def test_map_badoperation(self): - a = ['1', 2, 3, 'b', None] - raises(TypeError, map, lambda x: x+1, a) - - def test_map_multiply_identity(self): - a = ['1', 2, 3, 'b', None] - b = [ 2, 3, 4, 5, 6] - assert map(None, a, b) == [('1', 2), (2, 3), (3, 4), ('b', 5), (None, 6)] - - def test_map_multiply(self): - a = [1, 2, 3, 4] - b = [0, 1, 1, 1] - assert map(lambda x, y: x+y, a, b) == [1, 2, 4, 5] - - def test_map_multiply(self): - a = [1, 2, 3, 4, 5] - b = [] - assert map(lambda x, y: x, a, b) == a - -class AppTestZip: - def test_one_list(self): - assert zip([1,2,3]) == [(1,), (2,), (3,)] - - def test_three_lists(self): - assert zip([1,2,3], [1,2], [1,2,3]) == [(1,1,1), (2,2,2)] - -class AppTestReduce: - def test_None(self): - raises(TypeError, reduce, lambda x, y: x+y, [1,2,3], None) - - def test_sum(self): - assert reduce(lambda x, y: x+y, [1,2,3,4], 0) == 10 - assert reduce(lambda x, y: x+y, [1,2,3,4]) == 10 - - def test_minus(self): - assert reduce(lambda x, y: x-y, [10, 2, 8]) == 0 - assert reduce(lambda x, y: x-y, [2, 8], 10) == 0 - -class AppTestFilter: - def test_None(self): - assert filter(None, ['a', 'b', 1, 0, None]) == ['a', 'b', 1] - - def test_return_type(self): - txt = "This is a test text" - assert filter(None, txt) == txt - tup = ("a", None, 0, [], 1) - assert filter(None, tup) == ("a", 1) - - def test_function(self): - assert filter(lambda x: x != "a", "a small text") == " smll text" diff --git a/pypy/module/test/test_import.py b/pypy/module/test/test_import.py deleted file mode 100644 index 7afc3f938a..0000000000 --- a/pypy/module/test/test_import.py +++ /dev/null @@ -1,144 +0,0 @@ -import autopath -from pypy.interpreter import gateway -import os - - -def _setup(dn=os.path.abspath(os.path.join(os.path.dirname(__file__), 'impsubdir'))): - import sys - sys.path.append(dn) - return sys.modules.copy() - -_setup = gateway.app2interp_temp(_setup,'setup') - -def _teardown(saved_modules): - import sys - sys.path.pop() - sys.modules.clear() - sys.modules.update(saved_modules) - -_teardown = gateway.app2interp_temp(_teardown,'teardown') - -class AppTestImport: - - def setup_class(cls): # interpreter-level - cls.saved_modules = _setup(cls.space) - - def teardown_class(cls): # interpreter-level - _teardown(cls.space,cls.saved_modules) - - def test_import_bare_dir_fails(self): - def imp(): - import notapackage - raises(ImportError,imp) - - def test_import_sys(self): - import sys - - def test_import_a(self): - import sys - import a - assert a == sys.modules.get('a') - - def test_import_a_cache(self): - import sys - import a - a0 = a - import a - assert a == a0 - - def test_import_pkg(self): - import sys - import pkg - assert pkg == sys.modules.get('pkg') - - def test_import_dotted(self): - import sys - import pkg.a - assert pkg == sys.modules.get('pkg') - assert pkg.a == sys.modules.get('pkg.a') - - def test_import_dotted_cache(self): - import sys - import pkg.a - assert pkg == sys.modules.get('pkg') - assert pkg.a == sys.modules.get('pkg.a') - pkg0 = pkg - pkg_a0 = pkg.a - import pkg.a - assert pkg == pkg0 - assert pkg.a == pkg_a0 - - def test_import_dotted2(self): - import sys - import pkg.pkg1.a - assert pkg == sys.modules.get('pkg') - assert pkg.pkg1 == sys.modules.get('pkg.pkg1') - assert pkg.pkg1.a == sys.modules.get('pkg.pkg1.a') - - def test_import_ambig(self): - import sys - import ambig - assert ambig == sys.modules.get('ambig') - assert hasattr(ambig,'imapackage') - - def test_from_a(self): - import sys - from a import imamodule - assert 'a' in sys.modules - assert imamodule == 1 - - def test_from_dotted(self): - import sys - from pkg.a import imamodule - assert 'pkg' in sys.modules - assert 'pkg.a' in sys.modules - assert imamodule == 1 - - def test_from_pkg_import_module(self): - import sys - from pkg import a - assert 'pkg' in sys.modules - assert 'pkg.a' in sys.modules - pkg = sys.modules.get('pkg') - assert a == pkg.a - aa = sys.modules.get('pkg.a') - assert a == aa - - def test_import_relative(self): - from pkg import relative_a - assert relative_a.a.inpackage ==1 - - def test_import_relative_back_to_absolute(self): - from pkg import abs_b - assert abs_b.b.inpackage ==0 - import sys - assert sys.modules.get('pkg.b') ==None - - def test_import_pkg_relative(self): - import pkg_relative_a - assert pkg_relative_a.a.inpackage ==1 - - def test_import_relative_partial_success(self): - def imp(): - import pkg_r.inpkg - raises(ImportError,imp) - - def test_import_Globals_Are_None(self): - import sys - m = __import__('sys') - assert sys == m - n = __import__('sys', None, None, ['']) - assert sys == n - - def test_import_relative_back_to_absolute2(self): - from pkg import abs_x_y - import sys - assert abs_x_y.x.__name__ =='x' - assert abs_x_y.x.y.__name__ =='x.y' - # grrr XXX not needed probably... - #self.assertEquals(sys.modules.get('pkg.x'),None) - #self.assert_('pkg.x.y' not in sys.modules) - - def test_substituting_import(self): - from pkg_substituting import mod - assert mod.__name__ =='pkg_substituting.mod' diff --git a/pypy/module/test/test_minmax.py b/pypy/module/test/test_minmax.py deleted file mode 100644 index e8b6a7d04b..0000000000 --- a/pypy/module/test/test_minmax.py +++ /dev/null @@ -1,67 +0,0 @@ -import autopath - -class AppTestMin: - - def test_min_notseq(self): - raises(TypeError, min, 1) - - def test_min_usual(self): - assert min(1, 2, 3) == 1 - - def test_min_floats(self): - assert min(0.1, 2.7, 14.7) == 0.1 - - # write fixed point if that becomes a type. - - def test_min_chars(self): - assert min('a', 'b', 'c') == 'a' - - # write a unicode test when unicode works. - - def test_min_strings(self): - assert min('aaa', 'bbb', 'c') == 'aaa' - - # write an imaginary test when we have complex numbers - - # XXX we have no mixed comparison operator yet on StdObjSpace - def _test_min_mixed(self): - assert min('1', 2, 3, 'aa') == 2 - - def test_min_noargs(self): - raises(TypeError, min) - - def test_min_empty(self): - raises(ValueError, min, []) - -class AppTestMax: - - def test_max_notseq(self): - raises(TypeError, max, 1) - - def test_max_usual(self): - assert max(1, 2, 3) == 3 - - def test_max_floats(self): - assert max(0.1, 2.7, 14.7) == 14.7 - - # write fixed point if that becomes a type. - - def test_max_chars(self): - assert max('a', 'b', 'c') == 'c' - - # write a unicode test when unicode works. - - def test_max_strings(self): - assert max('aaa', 'bbb', 'c') == 'c' - - # write an imaginary test when we have complex numbers - - # XXX we have no mixed comparison operator yet on StdObjSpace - def _test_max_mixed(self): - assert max('1', 2, 3, 'aa') == 'aa' - - def test_max_noargs(self): - raises(TypeError, max) - - def test_max_empty(self): - raises(ValueError, max, []) diff --git a/pypy/module/test/test_newstyleclasses.py b/pypy/module/test/test_newstyleclasses.py deleted file mode 100644 index 18aa278050..0000000000 --- a/pypy/module/test/test_newstyleclasses.py +++ /dev/null @@ -1,71 +0,0 @@ -import autopath - - -class AppTestBuiltinApp: - def test_staticmethod(self): - class C: - def f(a, b): - return a+b - f = staticmethod(f) - class D(C): - pass - - c = C() - d = D() - assert c.f("abc", "def") == "abcdef" - assert C.f("abc", "def") == "abcdef" - assert d.f("abc", "def") == "abcdef" - assert D.f("abc", "def") == "abcdef" - - def test_classmethod(self): - class C: - def f(cls, stuff): - return cls, stuff - f = classmethod(f) - class D(C): - pass - - c = C() - d = D() - assert c.f("abc") == (C, "abc") - assert C.f("abc") == (C, "abc") - assert d.f("abc") == (D, "abc") - assert D.f("abc") == (D, "abc") - - def test_property_simple(self): - - class a(object): - def _get(self): return 42 - def _set(self, value): raise AttributeError - def _del(self): raise KeyError - name = property(_get, _set, _del) - a1 = a() - assert a1.name == 42 - raises(AttributeError, setattr, a1, 'name', 42) - raises(KeyError, delattr, a1, 'name') - - def test_super(self): - class A(object): - def f(self): - return 'A' - class B(A): - def f(self): - return 'B' + super(B,self).f() - class C(A): - def f(self): - return 'C' + super(C,self).f() - class D(B, C): - def f(self): - return 'D' + super(D,self).f() - d = D() - assert d.f() == "DBCA" - assert D.__mro__ == (D, B, C, A, object) - - def test_super_metaclass(self): - class xtype(type): - def __init__(self, name, bases, dict): - super(xtype, self).__init__(name, bases, dict) - A = xtype('A', (), {}) - assert isinstance(A, xtype) - a = A() - assert isinstance(a, A) diff --git a/pypy/module/test/test_range.py b/pypy/module/test/test_range.py deleted file mode 100644 index 1b099cd660..0000000000 --- a/pypy/module/test/test_range.py +++ /dev/null @@ -1,67 +0,0 @@ -import autopath - -class AppTestRange: - - def test_range_toofew(self): - raises(TypeError, range) - - def test_range_toomany(self): - raises(TypeError, range, 1, 2, 3, 4) - - def test_range_one(self): - assert range(1) == [0] - - def test_range_posstartisstop(self): - assert range(1, 1) == [] - - def test_range_negstartisstop(self): - assert range(-1, -1) == [] - - - def test_range_zero(self): - assert range(0) == [] - - def test_range_twoargs(self): - assert range(1, 2) == [1] - - def test_range_decreasingtwoargs(self): - assert range(3, 1) == [] - - def test_range_negatives(self): - assert range(-3) == [] - - def test_range_decreasing_negativestep(self): - assert range(5, -2, -1) == [5, 4, 3, 2, 1, 0 , -1] - - def test_range_posfencepost1(self): - assert range (1, 10, 3) == [1, 4, 7] - - def test_range_posfencepost2(self): - assert range (1, 11, 3) == [1, 4, 7, 10] - - def test_range_posfencepost3(self): - assert range (1, 12, 3) == [1, 4, 7, 10] - - def test_range_negfencepost1(self): - assert range (-1, -10, -3) == [-1, -4, -7] - - def test_range_negfencepost2(self): - assert range (-1, -11, -3) == [-1, -4, -7, -10] - - def test_range_negfencepost3(self): - assert range (-1, -12, -3) == [-1, -4, -7, -10] - - def test_range_decreasing_negativelargestep(self): - assert range(5, -2, -3) == [5, 2, -1] - - def test_range_increasing_positivelargestep(self): - assert range(-5, 2, 3) == [-5, -2, 1] - - def test_range_zerostep(self): - raises(ValueError, range, 1, 5, 0) - -""" - def test_range_float(self): - "How CPython does it - UGLY, ignored for now." - assert range(0.1, 2.0, 1.1) == [0, 1] - """ diff --git a/pypy/module/test/test_reduce.py b/pypy/module/test/test_reduce.py deleted file mode 100644 index 7058777656..0000000000 --- a/pypy/module/test/test_reduce.py +++ /dev/null @@ -1,13 +0,0 @@ -import autopath - -class AppTestReduce: - def test_None(self): - raises(TypeError, reduce, lambda x, y: x+y, [1,2,3], None) - - def test_sum(self): - assert reduce(lambda x, y: x+y, [1,2,3,4], 0) == 10 - assert reduce(lambda x, y: x+y, [1,2,3,4]) == 10 - - def test_minus(self): - assert reduce(lambda x, y: x-y, [10, 2, 8]) == 0 - assert reduce(lambda x, y: x-y, [2, 8], 10) == 0 diff --git a/pypy/module/test/test_sysmodule.py b/pypy/module/test/test_sysmodule.py deleted file mode 100644 index 678141719d..0000000000 --- a/pypy/module/test/test_sysmodule.py +++ /dev/null @@ -1,87 +0,0 @@ -import autopath - -class TestSysTests: - def setup_method(self,method): - self.sys_w = self.space.get_builtin_module("sys") - def test_stdout_exists(self): - s = self.space - assert s.is_true(s.getattr(self.sys_w, s.wrap("stdout"))) - -class AppTestAppSysTests: - def test_path_exists(self): - import sys - assert hasattr(sys, 'path'), "sys.path gone missing" - def test_modules_exists(self): - import sys - assert hasattr(sys, 'modules'), "sys.modules gone missing" - def test_dict_exists(self): - import sys - assert hasattr(sys, '__dict__'), "sys.__dict__ gone missing" - def test_name_exists(self): - import sys - assert hasattr(sys, '__name__'), "sys.__name__ gone missing" - def test_builtin_module_names_exists(self): - import sys - assert hasattr(sys, 'builtin_module_names'), ( - "sys.builtin_module_names gone missing") - def test_warnoptions_exists(self): - import sys - assert hasattr(sys, 'warnoptions'), ( - "sys.warnoptions gone missing") - def test_hexversion_exists(self): - import sys - assert hasattr(sys, 'hexversion'), ( - "sys.hexversion gone missing") - def test_platform_exists(self): - import sys - assert hasattr(sys, 'platform'), "sys.platform gone missing" - - def test_sys_in_modules(self): - import sys - modules = sys.modules - assert 'sys' in modules, ( "An entry for sys " - "is not in sys.modules.") - sys2 = sys.modules['sys'] - assert sys is sys2, "import sys is not sys.modules[sys]." - def test_builtin_in_modules(self): - import sys - modules = sys.modules - assert '__builtin__' in modules, ( "An entry for __builtin__ " - "is not in sys.modules.") - import __builtin__ - builtin2 = sys.modules['__builtin__'] - assert __builtin__ is builtin2, ( "import __builtin__ " - "is not sys.modules[__builtin__].") - def test_builtin_module_names(self): - import sys - names = sys.builtin_module_names - assert 'sys' in names, ( - "sys is not listed as a builtin module.") - assert '__builtin__' in names, ( - "__builtin__ is not listed as a builtin module.") - - def test_sys_exc_info(self): - try: - raise Exception - except Exception,e: - import sys - exc_type,exc_val,tb = sys.exc_info() - try: - raise Exception # 5 lines below the previous one - except Exception,e2: - exc_type2,exc_val2,tb2 = sys.exc_info() - assert exc_type ==Exception - assert exc_val ==e - assert exc_type2 ==Exception - assert exc_val2 ==e2 - assert tb2.tb_lineno - tb.tb_lineno == 5 - - def test_exc_info_normalization(self): - import sys - try: - 1/0 - except ZeroDivisionError: - etype, val, tb = sys.exc_info() - assert isinstance(val, etype) - else: - raise AssertionError, "ZeroDivisionError not caught" diff --git a/pypy/module/test/test_vars.py b/pypy/module/test/test_vars.py deleted file mode 100644 index c6b03c0b27..0000000000 --- a/pypy/module/test/test_vars.py +++ /dev/null @@ -1,18 +0,0 @@ -import autopath - -class AppTestVars: - - def _test_vars_no_arguments(self): - assert vars() == locals() - - def _test_vars_too_many_arguments(self): - raises(TypeError, vars, 0, 1) - - def _test_vars_correct_arguments(self): - class a: - def __init__(self): - self.res = 42 - assert vars(a) == a.__dict__ - a1 = a() - assert vars(a1) == a1.__dict__ - assert vars(a1).get('res') ==42 diff --git a/pypy/module/test/test_zip.py b/pypy/module/test/test_zip.py deleted file mode 100644 index 362d7d36e7..0000000000 --- a/pypy/module/test/test_zip.py +++ /dev/null @@ -1,31 +0,0 @@ -import autopath - -class AppTestZip: - - def test_zip_no_arguments(self): - raises(TypeError, zip) - - def test_one_list(self): - assert zip([1, 2, 3]) == [(1,), (2,), (3,)] - - def test_three_lists_same_size(self): - assert zip([1, 2, 3], [3, 4, 5], [6, 7, 8]) == ( - [(1, 3, 6), (2, 4, 7), (3, 5, 8)]) - - def test_three_lists_different_sizes(self): - assert zip([1, 2], [3, 4, 5, 6], [6, 7, 8]) == ( - [(1, 3, 6), (2, 4, 7)]) - - def test_tuples(self): - assert zip((1, 2, 3)) == [(1,), (2,), (3,)] - - def test_string(self): - assert zip('hello') == [('h',), ('e',), ('l',), ('l',), ('o',)] - - def test_strings(self): - assert zip('hello', 'bye') == ( - [('h', 'b'), ('e', 'y'), ('l', 'e')]) - - def test_mixed_types(self): - assert zip('hello', [1,2,3,4], (7,8,9,10)) == ( - [('h', 1, 7), ('e', 2, 8), ('l', 3, 9), ('l', 4, 10)]) diff --git a/pypy/objspace/__init__.py b/pypy/objspace/__init__.py deleted file mode 100644 index 1bb8bf6d7f..0000000000 --- a/pypy/objspace/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py deleted file mode 100644 index d5e6aae436..0000000000 --- a/pypy/objspace/descroperation.py +++ /dev/null @@ -1,421 +0,0 @@ -import operator -from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import ObjSpace -from pypy.interpreter.function import Function -from pypy.interpreter.gateway import BuiltinCode -from pypy.interpreter.argument import Arguments - -class Object: - def descr__getattribute__(space, w_obj, w_name): - name = space.unwrap(w_name) - w_descr = space.lookup(w_obj, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - return space.get(w_descr, w_obj) - w_dict = space.getdict(w_obj) - if w_dict is not None: - try: - return space.getitem(w_dict, w_name) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - if w_descr is not None: - return space.get(w_descr, w_obj) - raise OperationError(space.w_AttributeError, w_name) - - def descr__setattr__(space, w_obj, w_name, w_value): - name = space.unwrap(w_name) - w_descr = space.lookup(w_obj, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - return space.set(w_descr, w_obj, w_value) - w_dict = space.getdict(w_obj) - if w_dict is not None: - return space.setitem(w_dict, w_name, w_value) - raise OperationError(space.w_AttributeError, w_name) - - def descr__delattr__(space, w_obj, w_name): - name = space.unwrap(w_name) - w_descr = space.lookup(w_obj, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - return space.delete(w_descr, w_obj) - w_dict = space.getdict(w_obj) - if w_dict is not None: - try: - return space.delitem(w_dict, w_name) - except OperationError, ex: - if not ex.match(space, space.w_KeyError): - raise - raise OperationError(space.w_AttributeError, w_name) - - def descr__init__(space, w_obj, __args__): - pass # XXX some strange checking maybe - -class DescrOperation: - - def getdict(space, w_obj): - return w_obj.getdict() - - def is_data_descr(space, w_obj): - return space.lookup(w_obj, '__set__') is not None - - def get_and_call_args(space, w_descr, w_obj, args): - descr = space.unwrap_builtin(w_descr) - # a special case for performance and to avoid infinite recursion - if isinstance(descr, Function): - return descr.call_args(args.prepend(w_obj)) - else: - w_impl = space.get(w_descr, w_obj) - return space.call_args(w_impl, args) - - def get_and_call_function(space, w_descr, w_obj, *args_w): - args = Arguments(space, list(args_w)) - return space.get_and_call_args(w_descr, w_obj, args) - - def unwrap_builtin(self, w_obj): - return w_obj # hook for hack by TrivialObjSpace - - def call_args(space, w_obj, args): - # a special case for performance - if type(w_obj) is Function: - return w_obj.call_args(args) - w_descr = space.lookup(w_obj, '__call__') - if w_descr is None: - raise OperationError( - space.w_TypeError, - space.mod(space.wrap('object %r is not callable'), - space.newtuple([w_obj]))) - return space.get_and_call_args(w_descr, w_obj, args) - - def get(space, w_descr, w_obj, w_type=None): - w_get = space.lookup(w_descr, '__get__') - if w_get is None: - return w_descr - if w_type is None: - w_type = space.type(w_obj) - return space.get_and_call_function(w_get, w_descr, w_obj, w_type) - - def set(space, w_descr, w_obj, w_val): - w_set = space.lookup(w_descr, '__set__') - if w_set is None: - raise OperationError(space.w_TypeError, - space.wrap("object is not a descriptor with set")) - return space.get_and_call_function(w_set, w_descr, w_obj, w_val) - - def delete(space, w_descr, w_obj): - w_delete = space.lookup(w_descr, '__delete__') - if w_delete is None: - raise OperationError(space.w_TypeError, - space.wrap("object is not a descriptor with delete")) - return space.get_and_call_function(w_delete, w_descr, w_obj) - - def getattr(space, w_obj, w_name): - w_descr = space.lookup(w_obj, '__getattribute__') - try: - return space.get_and_call_function(w_descr, w_obj, w_name) - except OperationError, e: - if not e.match(space, space.w_AttributeError): - raise - w_descr = space.lookup(w_obj, '__getattr__') - if w_descr is None: - raise - return space.get_and_call_function(w_descr, w_obj, w_name) - - def setattr(space, w_obj, w_name, w_val): - w_descr = space.lookup(w_obj, '__setattr__') - if w_descr is None: - raise OperationError(space.w_AttributeError, - space.wrap("object is readonly")) - return space.get_and_call_function(w_descr, w_obj, w_name, w_val) - - def delattr(space, w_obj, w_name): - w_descr = space.lookup(w_obj, '__delattr__') - if w_descr is None: - raise OperationError(space.w_AttributeError, - space.wrap("object does not support attribute removal")) - return space.get_and_call_function(w_descr, w_obj, w_name) - - def is_true(space, w_obj): - if w_obj == space.w_False: - return False - if w_obj == space.w_True: - return True - if w_obj == space.w_None: - return False - w_descr = space.lookup(w_obj, '__nonzero__') - if w_descr is not None: - w_res = space.get_and_call_function(w_descr, w_obj) - return space.is_true(w_res) - w_descr = space.lookup(w_obj, '__len__') - if w_descr is not None: - w_res = space.get_and_call_function(w_descr, w_obj) - return space.is_true(w_res) - return True - - def str(space, w_obj): - w_descr = space.lookup(w_obj, '__str__') - return space.get_and_call_function(w_descr, w_obj) - # XXX PyObject_Str() checks that the result is a string - - def repr(space, w_obj): - w_descr = space.lookup(w_obj, '__repr__') - return space.get_and_call_function(w_descr, w_obj) - # XXX PyObject_Repr() probably checks that the result is a string - - def iter(space, w_obj): - w_descr = space.lookup(w_obj, '__iter__') - if w_descr is None: - w_descr = space.lookup(w_obj, '__getitem__') - if w_descr is None: - raise OperationError(space.w_TypeError, - space.wrap("object is not iter()-able")) - return space.newseqiter(w_obj) - return space.get_and_call_function(w_descr, w_obj) - - def next(space, w_obj): - w_descr = space.lookup(w_obj, 'next') - if w_descr is None: - raise OperationError(space.w_TypeError, - space.wrap("iterator has no next() method")) - return space.get_and_call_function(w_descr, w_obj) - - def getitem(space, w_obj, w_key): - w_descr = space.lookup(w_obj, '__getitem__') - if w_descr is None: - raise OperationError(space.w_TypeError, - space.wrap("cannot get items from object")) - return space.get_and_call_function(w_descr, w_obj, w_key) - - def setitem(space, w_obj, w_key, w_val): - w_descr = space.lookup(w_obj, '__setitem__') - if w_descr is None: - raise OperationError(space.w_TypeError, - space.wrap("cannot set items on object")) - return space.get_and_call_function(w_descr, w_obj, w_key, w_val) - - def delitem(space, w_obj, w_key): - w_descr = space.lookup(w_obj, '__delitem__') - if w_descr is None: - raise OperationError(space.w_TypeError, - space.wrap("cannot delete items from object")) - return space.get_and_call_function(w_descr, w_obj, w_key) - - def pow(space, w_obj1, w_obj2, w_obj3): - w_typ1 = space.type(w_obj1) - w_typ2 = space.type(w_obj2) - w_left_impl = space.lookup(w_obj1, '__pow__') - if space.is_true(space.is_(w_typ1, w_typ2)): - w_right_impl = None - else: - w_right_impl = space.lookup(w_obj2, '__rpow__') - if space.is_true(space.issubtype(w_typ1, w_typ2)): - w_obj1, w_obj2 = w_obj2, w_obj1 - w_left_impl, w_right_impl = w_right_impl, w_left_impl - if w_left_impl is not None: - w_res = space.get_and_call_function(w_left_impl, w_obj1, w_obj2, - w_obj3) - if _check_notimplemented(space, w_res): - return w_res - if w_right_impl is not None: - w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1, - w_obj3) - if _check_notimplemented(space, w_res): - return w_res - - raise OperationError(space.w_TypeError, - space.wrap("operands do not support **")) - - def contains(space, w_container, w_item): - w_descr = space.lookup(w_container, '__contains__') - if w_descr is not None: - return space.get_and_call_function(w_descr, w_container, w_item) - w_iter = space.iter(w_container) - while 1: - try: - w_next = space.next(w_iter) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - return space.w_False - if space.is_true(space.eq(w_next, w_item)): - return space.w_True - - - # xxx round, ord - - - -# helpers - -def _check_notimplemented(space, w_obj): - return not space.is_true(space.is_(w_obj, space.w_NotImplemented)) - -def _invoke_binop(space, w_impl, w_obj1, w_obj2): - if w_impl is not None: - w_res = space.get_and_call_function(w_impl, w_obj1, w_obj2) - if _check_notimplemented(space, w_res): - return w_res - return None - -# helper for invoking __cmp__ - -def _conditional_neg(space, w_obj, flag): - if flag: - return space.neg(w_obj) - else: - return w_obj - -def _cmp(space, w_obj1, w_obj2): - w_typ1 = space.type(w_obj1) - w_typ2 = space.type(w_obj2) - w_left_impl = space.lookup(w_obj1, '__cmp__') - do_neg1 = False - do_neg2 = True - if space.is_true(space.is_(w_typ1, w_typ2)): - w_right_impl = None - else: - w_right_impl = space.lookup(w_obj2, '__cmp__') - if space.is_true(space.issubtype(w_typ1, w_typ2)): - w_obj1, w_obj2 = w_obj2, w_obj1 - w_left_impl, w_right_impl = w_right_impl, w_left_impl - do_neg1, do_neg2 = do_neg2, do_neg1 - - w_res = _invoke_binop(space, w_left_impl, w_obj1, w_obj2) - if w_res is not None: - return _conditional_neg(space, w_res, do_neg1) - w_res = _invoke_binop(space, w_right_impl, w_obj2, w_obj1) - if w_res is not None: - return _conditional_neg(space, w_res, do_neg2) - # fall back to internal rules - if space.is_true(space.is_(w_obj1, w_obj2)): - return space.wrap(0) - if space.is_true(space.is_(w_obj1, space.w_None)): - return space.wrap(-1) - if space.is_true(space.is_(w_obj2, space.w_None)): - return space.wrap(1) - if space.is_true(space.is_(w_typ1, w_typ2)): - w_id1 = space.id(w_obj1) - w_id2 = space.id(w_obj2) - else: - w_id1 = space.id(w_typ1) - w_id2 = space.id(w_typ2) - if space.is_true(space.lt(w_id1, w_id2)): - return space.wrap(-1) - else: - return space.wrap(1) - -# regular methods def helpers - -from pypy.tool.hack import func_with_new_name - -def _make_binop_impl(symbol, specialnames): - left, right = specialnames - def binop_impl(space, w_obj1, w_obj2): - w_typ1 = space.type(w_obj1) - w_typ2 = space.type(w_obj2) - w_left_impl = space.lookup(w_obj1, left) - if space.is_true(space.is_(w_typ1, w_typ2)): - w_right_impl = None - else: - w_right_impl = space.lookup(w_obj2, right) - if space.is_true(space.issubtype(w_typ1, w_typ2)): - w_obj1, w_obj2 = w_obj2, w_obj1 - w_left_impl, w_right_impl = w_right_impl, w_left_impl - - w_res = _invoke_binop(space, w_left_impl, w_obj1, w_obj2) - if w_res is not None: - return w_res - w_res = _invoke_binop(space, w_right_impl, w_obj2, w_obj1) - if w_res is not None: - return w_res - raise OperationError(space.w_TypeError, - space.wrap("unsupported operand type(s) for %s" % symbol)) - return func_with_new_name(binop_impl, "binop_%s_impl"%left.strip('_')) - -def _make_comparison_impl(symbol, specialnames): - left, right = specialnames - op = getattr(operator, left) - def comparison_impl(space, w_obj1, w_obj2): - w_typ1 = space.type(w_obj1) - w_typ2 = space.type(w_obj2) - w_left_impl = space.lookup(w_obj1, left) - w_first = w_obj1 - w_second = w_obj2 - - if space.is_true(space.is_(w_typ1, w_typ2)): - w_right_impl = None - else: - w_right_impl = space.lookup(w_obj2, right) - if space.is_true(space.issubtype(w_typ1, w_typ2)): - w_obj1, w_obj2 = w_obj2, w_obj1 - w_left_impl, w_right_impl = w_right_impl, w_left_impl - - w_res = _invoke_binop(space, w_left_impl, w_obj1, w_obj2) - if w_res is not None: - return w_res - w_res = _invoke_binop(space, w_right_impl, w_obj2, w_obj1) - if w_res is not None: - return w_res - # fallback: lt(a, b) <= lt(cmp(a, b), 0) ... - w_res = _cmp(space, w_first, w_second) - res = space.unwrap(w_res) - return space.wrap(op(res, 0)) - - return func_with_new_name(comparison_impl, 'comparison_%s_impl'%left.strip('_')) - - -def _make_inplace_impl(symbol, specialnames): - specialname, = specialnames - assert specialname.startswith('__i') and specialname.endswith('__') - noninplacespacemethod = specialname[3:-2] - if noninplacespacemethod in ['or', 'and']: - noninplacespacemethod += '_' # not too clean - def inplace_impl(space, w_lhs, w_rhs): - w_impl = space.lookup(w_lhs, specialname) - if w_impl is not None: - w_res = space.get_and_call_function(w_impl, w_lhs, w_rhs) - if _check_notimplemented(space, w_res): - return w_res - # XXX fix the error message we get here - return getattr(space, noninplacespacemethod)(w_lhs, w_rhs) - - return func_with_new_name(inplace_impl, 'inplace_%s_impl'%specialname.strip('_')) - -def _make_unaryop_impl(symbol, specialnames): - specialname, = specialnames - def unaryop_impl(space, w_obj): - w_impl = space.lookup(w_obj, specialname) - if w_impl is None: - raise OperationError(space.w_TypeError, - space.wrap("operand does not support unary %s" % symbol)) - return space.get_and_call_function(w_impl, w_obj) - return func_with_new_name(unaryop_impl, 'unaryop_%s_impl'%specialname.strip('_')) - - -# add regular methods - -for _name, _symbol, _arity, _specialnames in ObjSpace.MethodTable: - if not hasattr(DescrOperation, _name): - _impl_maker = None - if _arity ==2 and _name in ['lt', 'le', 'gt', 'ge', 'ne', 'eq']: - #print "comparison", _specialnames - _impl_maker = _make_comparison_impl - elif _arity == 2 and _name.startswith('inplace_'): - #print "inplace", _specialnames - _impl_maker = _make_inplace_impl - elif _arity == 2 and len(_specialnames) == 2: - #print "binop", _specialnames - _impl_maker = _make_binop_impl - elif _arity == 1 and len(_specialnames) == 1: - #print "unaryop", _specialnames - _impl_maker = _make_unaryop_impl - if _impl_maker: - setattr(DescrOperation,_name,_impl_maker(_symbol,_specialnames)) - elif _name not in ['is_', 'id','type','issubtype', - # not really to be defined in DescrOperation - 'ord', 'round']: - raise Exception, "missing def for operation%s" % _name - - - diff --git a/pypy/objspace/flow/__init__.py b/pypy/objspace/flow/__init__.py deleted file mode 100644 index 3b9691083f..0000000000 --- a/pypy/objspace/flow/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from objspace import FlowObjSpace -Space = FlowObjSpace diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py deleted file mode 100644 index f2f01e0c06..0000000000 --- a/pypy/objspace/flow/flowcontext.py +++ /dev/null @@ -1,232 +0,0 @@ -from pypy.interpreter.executioncontext import ExecutionContext -from pypy.interpreter.pyframe import ExitFrame -from pypy.interpreter.error import OperationError -from pypy.objspace.flow.model import * -from pypy.objspace.flow.framestate import FrameState - - -class SpamBlock(Block): - dead = False - - def __init__(self, framestate): - Block.__init__(self, framestate.getvariables()) - self.framestate = framestate - - def patchframe(self, frame, executioncontext): - if self.dead: - raise ExitFrame(None) - self.framestate.restoreframe(frame) - executioncontext.crnt_block = self - executioncontext.crnt_ops = self.operations - - -class EggBlock(Block): - - def __init__(self, inputargs, prevblock, booloutcome): - Block.__init__(self, inputargs) - self.prevblock = prevblock - self.booloutcome = booloutcome - - def patchframe(self, frame, executioncontext): - parentblocks = [] - block = self - while isinstance(block, EggBlock): - block = block.prevblock - parentblocks.append(block) - # parentblocks = [Egg, Egg, ..., Egg, Spam] not including self - block.patchframe(frame, executioncontext) - replaylist = self.operations - prevblock = self - for block in parentblocks: - replaylist = ReplayList(block.operations, - prevblock, prevblock.booloutcome, - replaylist) - prevblock = block - executioncontext.crnt_ops = replaylist - -class ReplayList: - - def __init__(self, listtoreplay, nextblock, booloutcome, nextreplaylist): - self.listtoreplay = listtoreplay - self.nextblock = nextblock - self.booloutcome = booloutcome - self.nextreplaylist = nextreplaylist - self.index = 0 - - def append(self, operation): - operation.result = self.listtoreplay[self.index].result - assert operation == self.listtoreplay[self.index], ( - '\n'.join(["Not generating the same operation sequence:"] + - [str(s) for s in self.listtoreplay[:self.index]] + - [" ---> | while repeating we see here"] + - [" | %s" % operation] + - [str(s) for s in self.listtoreplay[self.index:]])) - self.index += 1 - - def finished(self): - return self.index == len(self.listtoreplay) - -class ConcreteNoOp: - # In "concrete mode", no SpaceOperations between Variables are allowed. - # Concrete mode is used to precompute lazily-initialized caches, - # when we don't want this precomputation to show up on the flow graph. - def append(self, operation): - raise AssertionError, "concrete mode: cannot perform %s" % operation - -class FlowExecutionContext(ExecutionContext): - - def __init__(self, space, code, globals, constargs={}, closure=None, - name=None): - ExecutionContext.__init__(self, space) - self.code = code - self.w_globals = w_globals = space.wrap(globals) - self.crnt_offset = -1 - if closure is None: - self.closure = None - else: - from pypy.interpreter.nestedscope import Cell - self.closure = [Cell(Constant(value)) for value in closure] - frame = self.create_frame() - formalargcount = code.getformalargcount() - arg_list = [Variable() for i in range(formalargcount)] - for position, value in constargs.items(): - arg_list[position] = Constant(value) - frame.setfastscope(arg_list) - self.joinpoints = {} - for joinpoint in code.getjoinpoints(): - self.joinpoints[joinpoint] = [] # list of blocks - initialblock = SpamBlock(FrameState(frame).copy()) - self.pendingblocks = [initialblock] - self.graph = FunctionGraph(name or code.co_name, initialblock) - - def create_frame(self): - # create an empty frame suitable for the code object - # while ignoring any operation like the creation of the locals dict - self.crnt_ops = [] - return self.code.create_frame(self.space, self.w_globals, - self.closure) - - def bytecode_trace(self, frame): - if not isinstance(self.crnt_ops, list): - return - next_instr = frame.next_instr - self.crnt_offset = next_instr # save offset for opcode - varnames = frame.code.getvarnames() - for name, w_value in zip(varnames, frame.getfastscope()): - if isinstance(w_value, Variable): - w_value.rename(name) - if next_instr in self.joinpoints: - currentstate = FrameState(frame) - # can 'currentstate' be merged with one of the blocks that - # already exist for this bytecode position? - for block in self.joinpoints[next_instr]: - newstate = block.framestate.union(currentstate) - if newstate is not None: - # yes - finished = newstate == block.framestate - break - else: - # no - newstate = currentstate.copy() - finished = False - block = None - - if finished: - newblock = block - else: - newblock = SpamBlock(newstate) - # unconditionally link the current block to the newblock - outputargs = currentstate.getoutputargs(newstate) - self.crnt_block.closeblock(Link(outputargs, newblock)) - # phew - if finished: - raise ExitFrame(None) - if block is not None and block.exits: - # to simplify the graph, we patch the old block to point - # directly at the new block which is its generalization - block.dead = True - block.operations = () - block.exitswitch = None - outputargs = block.framestate.getoutputargs(newstate) - block.recloseblock(Link(outputargs, newblock)) - newblock.patchframe(frame, self) - self.joinpoints[next_instr].insert(0, newblock) - - def guessbool(self, w_condition, cases=[False,True], - replace_last_variable_except_in_first_case = None): - if isinstance(self.crnt_ops, list): - block = self.crnt_block - vars = vars2 = block.getvariables() - links = [] - for case in cases: - egg = EggBlock(vars2, block, case) - self.pendingblocks.append(egg) - link = Link(vars, egg, case) - links.append(link) - if replace_last_variable_except_in_first_case is not None: - assert block.operations[-1].result is vars[-1] - vars = vars[:-1] - vars.extend(replace_last_variable_except_in_first_case) - vars2 = vars2[:-1] - while len(vars2) < len(vars): - vars2.append(Variable()) - replace_last_variable_except_in_first_case = None - block.exitswitch = w_condition - block.closeblock(*links) - # forked the graph. Note that False comes before True by default - # in the exits tuple so that (just in case we need it) we - # actually have block.exits[False] = elseLink and - # block.exits[True] = ifLink. - raise ExitFrame(None) - if isinstance(self.crnt_ops, ReplayList): - replaylist = self.crnt_ops - assert replaylist.finished() - self.crnt_block = replaylist.nextblock - self.crnt_ops = replaylist.nextreplaylist - return replaylist.booloutcome - raise AssertionError, "concrete mode: cannot guessbool(%s)" % ( - w_condition,) - - def guessexception(self, *classes): - outcome = self.guessbool(Constant(last_exception), - cases = [None] + list(classes), - replace_last_variable_except_in_first_case = [ - Constant(last_exception), # exc. class - Constant(last_exc_value)]) # exc. value - if outcome is None: - w_exc_cls, w_exc_value = None, None - else: - w_exc_cls, w_exc_value = self.crnt_block.inputargs[-2:] - return outcome, w_exc_cls, w_exc_value - - def build_flow(self): - from pypy.objspace.flow.objspace import UnwrapException - while self.pendingblocks: - block = self.pendingblocks.pop(0) - frame = self.create_frame() - try: - block.patchframe(frame, self) - except ExitFrame: - continue # restarting a dead SpamBlock - try: - w_result = frame.resume() - except OperationError, e: - link = Link([e.w_type, e.w_value], self.graph.exceptblock) - self.crnt_block.closeblock(link) - else: - if w_result is not None: - link = Link([w_result], self.graph.returnblock) - self.crnt_block.closeblock(link) - self.fixeggblocks() - - def fixeggblocks(self): - # EggBlocks reuse the variables of their previous block, - # which is deemed not acceptable for simplicity of the operations - # that will be performed later on the flow graph. - def fixegg(node): - if isinstance(node, EggBlock): - mapping = {} - for a in node.inputargs: - mapping[a] = Variable(a) - node.renamevariables(mapping) - traverse(fixegg, self.graph) diff --git a/pypy/objspace/flow/framestate.py b/pypy/objspace/flow/framestate.py deleted file mode 100644 index d194fb953d..0000000000 --- a/pypy/objspace/flow/framestate.py +++ /dev/null @@ -1,172 +0,0 @@ -from pypy.interpreter.pyframe import PyFrame, ControlFlowException -from pypy.interpreter.error import OperationError -from pypy.objspace.flow.model import * - -class FrameState: - # XXX this class depends on the internal state of PyFrame objects - - def __init__(self, state): - if isinstance(state, PyFrame): - data = state.getfastscope() + state.valuestack.items - if state.last_exception is None: - data.append(Constant(None)) - data.append(Constant(None)) - else: - data.append(state.last_exception.w_type) - data.append(state.last_exception.w_value) - recursively_flatten(state.space, data) - self.mergeable = data - self.nonmergeable = ( - state.blockstack.items[:], - state.next_instr, - state.w_locals, - ) - elif isinstance(state, tuple): - self.mergeable, self.nonmergeable = state - else: - raise TypeError("can't get framestate for %r" % - state.__class__.__name__) - for w1 in self.mergeable: - assert isinstance(w1, (Variable, Constant)), ( - '%r found in frame state' % w1) - - def restoreframe(self, frame): - if isinstance(frame, PyFrame): - fastlocals = len(frame.fastlocals_w) - data = self.mergeable[:] - recursively_unflatten(frame.space, data) - frame.setfastscope(data[:fastlocals]) - frame.valuestack.items[:] = data[fastlocals:-2] - if data[-2] == Constant(None): - assert data[-1] == Constant(None) - frame.last_exception = None - else: - frame.last_exception = OperationError(data[-2], data[-1]) - ( - frame.blockstack.items[:], - frame.next_instr, - frame.w_locals, - ) = self.nonmergeable - else: - raise TypeError("can't set framestate for %r" % - frame.__class__.__name__) - - def copy(self): - "Make a copy of this state in which all Variables are fresh." - newstate = [] - for w in self.mergeable: - if isinstance(w, Variable): - w = Variable() - newstate.append(w) - return FrameState((newstate, self.nonmergeable)) - - def getvariables(self): - return [w for w in self.mergeable if isinstance(w, Variable)] - - def __eq__(self, other): - """Two states are equal - if they only use different Variables at the same place""" - # safety check, don't try to compare states with different - # nonmergeable states - assert isinstance(other, FrameState) - assert len(self.mergeable) == len(other.mergeable) - assert self.nonmergeable == other.nonmergeable - for w1, w2 in zip(self.mergeable, other.mergeable): - if not (w1 == w2 or (isinstance(w1, Variable) and - isinstance(w2, Variable))): - return False - return True - - def __ne__(self, other): - return not (self == other) - - def union(self, other): - """Compute a state that is at least as general as both self and other. - A state 'a' is more general than a state 'b' if all Variables in 'b' - are also Variables in 'a', but 'a' may have more Variables. - """ - newstate = [] - try: - for w1, w2 in zip(self.mergeable, other.mergeable): - newstate.append(union(w1, w2)) - except UnionError: - return None - return FrameState((newstate, self.nonmergeable)) - - def getoutputargs(self, targetstate): - "Return the output arguments needed to link self to targetstate." - result = [] - for w_output, w_target in zip(self.mergeable, targetstate.mergeable): - if isinstance(w_target, Variable): - result.append(w_output) - return result - - -class UnionError(Exception): - "The two states should be merged." - -def union(w1, w2): - "Union of two variables or constants." - if isinstance(w1, Variable) or isinstance(w2, Variable): - return Variable() # new fresh Variable - if isinstance(w1, Constant) and isinstance(w2, Constant): - if w1 == w2: - return w1 - # ControlFlowException represent stack unrollers in the stack. - # They should not be merged because they will be unwrapped. - # This is needed for try:except: and try:finally:, though - # it makes the control flow a bit larger by duplicating the - # handlers. - dont_merge_w1 = w1 in UNPICKLE_TAGS - dont_merge_w2 = w2 in UNPICKLE_TAGS - if dont_merge_w1 or dont_merge_w2: - raise UnionError - else: - return Variable() # generalize different constants - raise TypeError('union of %r and %r' % (w1.__class__.__name__, - w2.__class__.__name__)) - -# ____________________________________________________________ -# -# We have to flatten out the state of the frame into a list of -# Variables and Constants. This is done above by collecting the -# locals and the items on the value stack, but the latter may contain -# ControlFlowExceptions. We have to handle these specially, because -# some of them hide references to more Variables and Constants. -# The trick is to flatten ("pickle") them into the list so that the -# extra Variables show up directly in the list too. - -class PickleTag: - pass - -PICKLE_TAGS = {} -UNPICKLE_TAGS = {} - -def recursively_flatten(space, lst): - i = 0 - while i < len(lst): - item = lst[i] - if not (isinstance(item, Constant) and - isinstance(item.value, ControlFlowException)): - i += 1 - else: - unroller = item.value - vars = unroller.state_unpack_variables(space) - key = unroller.__class__, len(vars) - try: - tag = PICKLE_TAGS[key] - except: - tag = PICKLE_TAGS[key] = Constant(PickleTag()) - UNPICKLE_TAGS[tag] = key - lst[i:i+1] = [tag] + vars - -def recursively_unflatten(space, lst): - for i in range(len(lst)-1, -1, -1): - item = lst[i] - if item in UNPICKLE_TAGS: - unrollerclass, argcount = UNPICKLE_TAGS[item] - arguments = lst[i+1: i+1+argcount] - del lst[i+1: i+1+argcount] - unroller = unrollerclass() - unroller.state_pack_variables(space, *arguments) - lst[i] = Constant(unroller) diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py deleted file mode 100644 index 721b7ae9dc..0000000000 --- a/pypy/objspace/flow/model.py +++ /dev/null @@ -1,364 +0,0 @@ -# The model produced by the flowobjspace -# this is to be used by the translator mainly. -# -# the below object/attribute model evolved from -# a discussion in Berlin, 4th of october 2003 -from __future__ import generators - -class FunctionGraph: - def __init__(self, name, startblock, return_var=None): - self.name = name # function name (possibly mangled already) - self.startblock = startblock - self.startblock.isstartblock = True - # build default returnblock - self.returnblock = Block([return_var or Variable()]) - self.returnblock.operations = () - self.returnblock.exits = () - # block corresponding to exception results - self.exceptblock = Block([Variable(), # exception class - Variable()]) # exception value - self.exceptblock.operations = () - self.exceptblock.exits = () - - def getargs(self): - return self.startblock.inputargs - - def getreturnvar(self): - return self.returnblock.inputargs[0] - - def hasonlyexceptionreturns(self): - try: - return self._onlyex - except AttributeError: - def visit(link): - if isinstance(link, Link): - if link.target == self.returnblock: - raise ValueError(link) - try: - traverse(visit, self) - except ValueError: - self._onlyex = False - else: - self._onlyex = True - return self._onlyex - - def show(self): - from pypy.translator.tool.graphpage import SingleGraphPage - SingleGraphPage(self).display() - -class Link: - def __init__(self, args, target, exitcase=None): - assert len(args) == len(target.inputargs), "output args mismatch" - self.args = list(args) # mixed list of var/const - self.target = target # block - self.exitcase = exitcase # this is a concrete value - self.prevblock = None # the block this Link is an exit of - - def __repr__(self): - return "link from %s to %s" % (str(self.prevblock), str(self.target)) - -class Block: - isstartblock = False - - def __init__(self, inputargs): - self.inputargs = list(inputargs) # mixed list of variable/const - self.operations = [] # list of SpaceOperation(s) - self.exitswitch = None # a variable or - # Constant(last_exception), see below - self.exits = [] # list of Link(s) - - def __str__(self): - if self.operations: - txt = "block@%d" % self.operations[0].offset - else: - txt = "codeless block" - return txt - - def __repr__(self): - txt = "%s with %d exits" % (str(self), len(self.exits)) - if self.exitswitch: - txt = "%s(%s)" % (txt, self.exitswitch) - return txt - - def getvariables(self): - "Return all variables mentioned in this Block." - result = self.inputargs[:] - for op in self.operations: - result += op.args - result.append(op.result) - return uniqueitems([w for w in result if isinstance(w, Variable)]) - - def getconstants(self): - "Return all constants mentioned in this Block." - result = self.inputargs[:] - for op in self.operations: - result += op.args - return uniqueitems([w for w in result if isinstance(w, Constant)]) - - def renamevariables(self, mapping): - for a in mapping: - assert isinstance(a, Variable), a - self.inputargs = [mapping.get(a, a) for a in self.inputargs] - for op in self.operations: - op.args = [mapping.get(a, a) for a in op.args] - op.result = mapping.get(op.result, op.result) - for link in self.exits: - link.args = [mapping.get(a, a) for a in link.args] - - def closeblock(self, *exits): - assert self.exits == [], "block already closed" - self.recloseblock(*exits) - - def recloseblock(self, *exits): - for exit in exits: - exit.prevblock = self - self.exits = exits - - -class Variable: - counter = 0 - instances = {} - renamed = False - - def __init__(self, name=None): - self.name = 'v%d' % Variable.counter - Variable.instances[self.name] = self - Variable.counter += 1 - if name is not None: - self.rename(name) - - def __repr__(self): - return '%s' % self.name - - def rename(self, name): - if self.renamed: - return - if isinstance(name, Variable): - if not name.renamed: - return - name = name.name[:name.name.rfind('_')] - del Variable.instances[self.name] - self.renamed = True - self.name = name + '_' + self.name[1:] - Variable.instances[self.name] = self - - -class Constant: - def __init__(self, value): - self.value = value # a concrete value - # try to be smart about constant mutable or immutable values - key = type(self.value), self.value # to avoid confusing e.g. 0 and 0.0 - try: - hash(key) - except TypeError: - key = id(self.value) - self.key = key - - def __eq__(self, other): - return self.__class__ is other.__class__ and self.key == other.key - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return hash(self.key) - - def __repr__(self): - # try to limit the size of the repr to make it more readable - r = repr(self.value) - if (r.startswith('<') and r.endswith('>') and - hasattr(self.value, '__name__')): - r = '%s %s' % (type(self.value).__name__, self.value.__name__) - elif len(r) > 60 or (len(r) > 30 and type(self.value) is not str): - r = r[:20] + '...' + r[-8:] - return '(%s)' % r - -# hack! it is useful to have UNDEFINED be an instance of Constant too. -# PyFrame then automatically uses this Constant as a marker for -# non-initialized variables. -from pypy.interpreter.eval import UNDEFINED -UndefinedConstant = UNDEFINED.__class__ -UndefinedConstant.__bases__ += (Constant,) -Constant.__init__(UNDEFINED, None) - -class SpaceOperation: - def __init__(self, opname, args, result): - self.opname = opname # operation name - self.args = list(args) # mixed list of var/const - self.result = result # either Variable or Constant instance - self.offset = -1 # offset in code string, to be added later - - def __eq__(self, other): - return (self.__class__ is other.__class__ and - self.opname == other.opname and - self.args == other.args and - self.result == other.result) - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return hash((self.opname,tuple(self.args),self.result)) - - def __repr__(self): - return "%r = %s(%s)" % (self.result, self.opname, ", ".join(map(repr, self.args))) - -class Atom: - "NOT_RPYTHON" - def __init__(self, name): - self.name = name - def __repr__(self): - return self.name -last_exception = Atom('last_exception') -last_exc_value = Atom('last_exc_value') -# if Block().exitswitch == Constant(last_exception), it means that we are -# interested in catching the exception that the *last operation* of the -# block could raise. The exitcases of the links are None for no exception -# or XxxError classes to catch the matching exceptions. - -def uniqueitems(lst): - "Returns a list with duplicate elements removed." - result = [] - seen = {} - for item in lst: - if item not in seen: - result.append(item) - seen[item] = True - return result - - -#_________________________________________________________ -# a visitor for easy traversal of the above model - -import inspect # for getmro - -class traverse: - edgedef = { - FunctionGraph : ('startblock',), - Block : ('exits',), - Link : ('target',), - } - - def __init__(self, visitor, functiongraph): - """ send the visitor over all (reachable) nodes. - the visitor needs to have either callable attributes 'visit_typename' - or otherwise is callable itself. - """ - self.visitor = visitor - self.seen = {} - self.visit(functiongraph) - - def visit(self, node): - if id(node) in self.seen: - return - - # do the visit - cls = node.__class__ - for subclass in inspect.getmro(cls): - consume = getattr(self.visitor, "visit_" + subclass.__name__, None) - if consume: - break - else: - consume = getattr(self.visitor, 'visit', self.visitor) - - assert callable(consume), "visitor not found for %r on %r" % (cls, self.visitor) - self.seen[id(node)] = consume(node) - - # recurse - for dispclass, attrs in self.edgedef.items(): - for subclass in inspect.getmro(cls): - if subclass == dispclass: - for attr in attrs: - for obj in flattenobj(getattr(node, attr)): - self.visit(obj) - return - - raise ValueError, "could not dispatch %r" % cls - -def flatten(funcgraph): - l = [] - traverse(l.append, funcgraph) - return l - -def flattenobj(*args): - for arg in args: - try: - for atom in flattenobj(*arg): - yield atom - except: yield arg - -def mkentrymap(funcgraph): - "Returns a dict mapping Blocks to lists of Links." - startlink = Link(funcgraph.getargs(), funcgraph.startblock) - result = {funcgraph.startblock: [startlink]} - def visit(link): - if isinstance(link, Link): - lst = result.setdefault(link.target, []) - lst.append(link) - traverse(visit, funcgraph) - return result - -def checkgraph(graph): - "Check the consistency of a flow graph." - if __debug__: - this_block = [None] - exitblocks = {graph.returnblock: 1, # retval - graph.exceptblock: 2} # exc_cls, exc_value - - def visit(block): - if isinstance(block, Block): - this_block[0] = block - assert bool(block.isstartblock) == (block is graph.startblock) - if not block.exits: - assert block in exitblocks - vars = {} - resultvars = [op.result for op in block.operations] - for v in block.inputargs + resultvars: - assert isinstance(v, Variable) - assert v not in vars, "duplicate variable %r" % (v,) - assert v not in vars_previous_blocks, ( - "variable %r used in more than one block" % (v,)) - vars[v] = True - for op in block.operations: - for v in op.args: - assert isinstance(v, (Constant, Variable)) - if isinstance(v, Variable): - assert v in vars - if block.exitswitch is None: - assert len(block.exits) <= 1 - if block.exits: - assert block.exits[0].exitcase is None - elif block.exitswitch == Constant(last_exception): - assert len(block.operations) >= 1 - assert len(block.exits) >= 1 - assert block.exits[0].exitcase is None - for link in block.exits[1:]: - assert issubclass(link.exitcase, Exception) - else: - assert isinstance(block.exitswitch, Variable) - assert block.exitswitch in vars - for link in block.exits: - assert len(link.args) == len(link.target.inputargs) - assert link.prevblock is block - for v in link.args: - assert isinstance(v, (Constant, Variable)) - if isinstance(v, Variable): - assert v in vars - vars_previous_blocks.update(vars) - - try: - for block, nbargs in exitblocks.items(): - this_block[0] = block - assert len(block.inputargs) == nbargs - assert not block.operations - assert not block.exits - - vars_previous_blocks = {} - - traverse(visit, graph) - - except AssertionError, e: - # hack for debug tools only - if this_block[0] and not hasattr(e, '__annotator_block'): - setattr(e, '__annotator_block', this_block[0]) - raise diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py deleted file mode 100644 index ff6a72987d..0000000000 --- a/pypy/objspace/flow/objspace.py +++ /dev/null @@ -1,352 +0,0 @@ -# ______________________________________________________________________ -import sys, operator, types -from pypy.interpreter.baseobjspace import ObjSpace -from pypy.interpreter.pycode import PyCode -from pypy.interpreter.error import OperationError -from pypy.objspace.flow.model import * -from pypy.objspace.flow import flowcontext - -debug = 0 - -class UnwrapException(Exception): - "Attempted to unwrap a Variable." - -# ______________________________________________________________________ -class FlowObjSpace(ObjSpace): - """NOT_RPYTHON. - The flow objspace space is used to produce a flow graph by recording - the space operations that the interpreter generates when it interprets - (the bytecode of) some function. - """ - - full_exceptions = False - - resolve_constants = True # used by the appflowspace - - def initialize(self): - import __builtin__ - self.concrete_mode = 0 - self.w_builtins = Constant(__builtin__.__dict__) - self.w_None = Constant(None) - self.w_False = Constant(False) - self.w_True = Constant(True) - for exc in [KeyError, ValueError, IndexError, StopIteration, - AssertionError]: - clsname = exc.__name__ - setattr(self, 'w_'+clsname, Constant(exc)) - # the following exceptions are the ones that should not show up - # during flow graph construction; they are triggered by - # non-R-Pythonic constructs or real bugs like typos. - for exc in [NameError, UnboundLocalError]: - clsname = exc.__name__ - setattr(self, 'w_'+clsname, None) - self.specialcases = {} - #self.make_builtins() - #self.make_sys() - - def loadfromcache(self, key, builder, cache): - # when populating the caches, the flow space switches to - # "concrete mode". In this mode, only Constants are allowed - # and no SpaceOperation is recorded. - def my_builder(key, stuff): - previous_ops = self.executioncontext.crnt_ops - self.executioncontext.crnt_ops = flowcontext.ConcreteNoOp() - self.concrete_mode += 1 - try: - return builder(key, stuff) - finally: - self.executioncontext.crnt_ops = previous_ops - self.concrete_mode -= 1 - return super(FlowObjSpace, self).loadfromcache(key, my_builder, cache) - - def newdict(self, items_w): - if self.concrete_mode: - content = [(self.unwrap(w_key), self.unwrap(w_value)) - for w_key, w_value in items_w] - return Constant(dict(content)) - flatlist_w = [] - for w_key, w_value in items_w: - flatlist_w.append(w_key) - flatlist_w.append(w_value) - return self.do_operation('newdict', *flatlist_w) - - def newtuple(self, args_w): - try: - content = [self.unwrap(w_arg) for w_arg in args_w] - except UnwrapException: - return self.do_operation('newtuple', *args_w) - else: - return Constant(tuple(content)) - - def newlist(self, args_w): - if self.concrete_mode: - content = [self.unwrap(w_arg) for w_arg in args_w] - return Constant(content) - return self.do_operation('newlist', *args_w) - - def newslice(self, w_start=None, w_stop=None, w_step=None): - if w_start is None: w_start = self.w_None - if w_stop is None: w_stop = self.w_None - if w_step is None: w_step = self.w_None - if self.concrete_mode: - return Constant(slice(self.unwrap(w_start), - self.unwrap(w_stop), - self.unwrap(w_step))) - return self.do_operation('newslice', w_start, w_stop, w_step) - - def wrap(self, obj): - if isinstance(obj, (Variable, Constant)) and obj is not UNDEFINED: - raise TypeError("already wrapped: " + repr(obj)) - return Constant(obj) - - def unwrap(self, w_obj): - if isinstance(w_obj, Variable): - raise UnwrapException - elif isinstance(w_obj, Constant): - return w_obj.value - else: - raise TypeError("not wrapped: " + repr(w_obj)) - - def getexecutioncontext(self): - return self.executioncontext - - def reraise(self): - etype, evalue, etb = sys.exc_info() - #print >> sys.stderr, '*** reraise', etype, evalue - raise OperationError, OperationError(self.wrap(etype), self.wrap(evalue)), etb - - def setup_executioncontext(self, ec): - self.executioncontext = ec - from pypy.objspace.flow import specialcase - specialcase.setup(self) - - def build_flow(self, func, constargs={}): - """ - """ - if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): - raise Exception, "%r is tagged as NOT_RPYTHON" % (func,) - code = func.func_code - code = PyCode()._from_code(code) - if func.func_closure is None: - closure = None - else: - closure = [extract_cell_content(c) for c in func.func_closure] - # CallableFactory.pycall may add class_ to functions that are methods - name = func.func_name - class_ = getattr(func, 'class_', None) - if class_ is not None: - name = '%s.%s' % (class_.__name__, name) - for c in "<>&!": - name = name.replace(c, '_') - ec = flowcontext.FlowExecutionContext(self, code, func.func_globals, - constargs, closure, name) - self.setup_executioncontext(ec) - ec.build_flow() - checkgraph(ec.graph) - return ec.graph - - def unpacktuple(self, w_tuple, expected_length=None): - # special case to accept either Constant tuples - # or real tuples of Variables/Constants - if isinstance(w_tuple, tuple): - result = w_tuple - else: - unwrapped = self.unwrap(w_tuple) - result = tuple([Constant(x) for x in unwrapped]) - if expected_length is not None and len(result) != expected_length: - raise ValueError, "got a tuple of length %d instead of %d" % ( - len(result), expected_length) - return result - - def unpackiterable(self, w_iterable, expected_length=None): - if isinstance(w_iterable, Variable) and expected_length is None: - raise UnwrapException, ("cannot unpack a Variable iterable" - "without knowing its length") -## # XXX TEMPORARY HACK XXX TEMPORARY HACK XXX TEMPORARY HACK -## print ("*** cannot unpack a Variable iterable " -## "without knowing its length,") -## print " assuming a list or tuple with up to 7 items" -## items = [] -## w_len = self.len(w_iterable) -## i = 0 -## while True: -## w_i = self.wrap(i) -## w_cond = self.eq(w_len, w_i) -## if self.is_true(w_cond): -## break # done -## if i == 7: -## # too many values -## raise OperationError(self.w_AssertionError, self.w_None) -## w_item = self.do_operation('getitem', w_iterable, w_i) -## items.append(w_item) -## i += 1 -## return items -## # XXX TEMPORARY HACK XXX TEMPORARY HACK XXX TEMPORARY HACK - elif expected_length is not None: - w_len = self.len(w_iterable) - w_correct = self.eq(w_len, self.wrap(expected_length)) - if not self.is_true(w_correct): - raise OperationError(self.w_ValueError, self.w_None) - return [self.do_operation('getitem', w_iterable, self.wrap(i)) - for i in range(expected_length)] - return ObjSpace.unpackiterable(self, w_iterable, expected_length) - - # ____________________________________________________________ - def do_operation(self, name, *args_w): - spaceop = SpaceOperation(name, args_w, Variable()) - if hasattr(self, 'executioncontext'): # not here during bootstrapping - spaceop.offset = self.executioncontext.crnt_offset - self.executioncontext.crnt_ops.append(spaceop) - return spaceop.result - - def is_true(self, w_obj): - try: - obj = self.unwrap(w_obj) - except UnwrapException: - pass - else: - return bool(obj) - w_truthvalue = self.do_operation('is_true', w_obj) - context = self.getexecutioncontext() - return context.guessbool(w_truthvalue) - - def next(self, w_iter): - w_item = self.do_operation("next", w_iter) - context = self.getexecutioncontext() - outcome, w_exc_cls, w_exc_value = context.guessexception(StopIteration) - if outcome is StopIteration: - raise OperationError(self.w_StopIteration, self.w_None) - else: - return w_item - - def call_args(self, w_callable, args): - try: - fn = self.unwrap(w_callable) - sc = self.specialcases[fn] # TypeError if 'fn' not hashable - except (UnwrapException, KeyError, TypeError): - pass - else: - return sc(self, fn, args) - - try: - args_w, kwds_w = args.unpack() - except UnwrapException: - args_w, kwds_w = '?', '?' - if not kwds_w: - # simple case - w_res = self.do_operation('simple_call', w_callable, *args_w) - else: - # general case - shape, args_w = args.flatten() - w_res = self.do_operation('call_args', w_callable, Constant(shape), - *args_w) - - # maybe the call has generated an exception (any one) - # but, let's say, not if we are calling a built-in class or function - # because this gets in the way of the special-casing of - # - # raise SomeError(x) - # - # as shown by test_objspace.test_raise3. - - exceptions = [Exception] # *any* exception by default - if isinstance(w_callable, Constant): - c = w_callable.value - if isinstance(c, (types.BuiltinFunctionType, - types.BuiltinMethodType)): - exceptions = None - elif (isinstance(c, (type, types.ClassType)) and - c.__module__ in ['__builtin__', 'exceptions']): - exceptions = None - self.handle_implicit_exceptions(exceptions) - return w_res - - def handle_implicit_exceptions(self, exceptions): - if exceptions: - # catch possible exceptions implicitly. If the OperationError - # below is not caught in the same function, it will produce an - # exception-raising return block in the flow graph. The special - # value 'wrap(last_exception)' is used as a marker for this kind - # of implicit exceptions, and simplify.py will remove it as per - # the RPython definition: implicit exceptions not explicitly - # caught in the same function are assumed not to occur. - context = self.getexecutioncontext() - outcome, w_exc_cls, w_exc_value = context.guessexception(*exceptions) - if outcome is not None: - # we assume that the caught exc_cls will be exactly the - # one specified by 'outcome', and not a subclass of it, - # unless 'outcome' is Exception. - if outcome is not Exception: - w_exc_cls = Constant(outcome) - raise OperationError(w_exc_cls, w_exc_value) - -# ______________________________________________________________________ - -implicit_exceptions = { - 'getitem': [IndexError, KeyError], - } - -def extract_cell_content(c): - """Get the value contained in a CPython 'cell', as read through - the func_closure of a function object.""" - # yuk! this is all I could come up with that works in Python 2.2 too - class X(object): - def __eq__(self, other): - self.other = other - x = X() - x_cell, = (lambda: x).func_closure - x_cell == c - return x.other - -def make_op(name, symbol, arity, specialnames): - if hasattr(FlowObjSpace, name): - return # Shouldn't do it - - op = getattr(operator, name, None) - if not op: - #if name == 'call': - # op = apply - if name == 'issubtype': - op = issubclass - elif name == 'is_': - op = lambda x, y: x is y - elif name == 'getattr': - op = getattr - else: - if debug: print >> sys.stderr, "XXX missing operator:", name - - exceptions = implicit_exceptions.get(name) - - def generic_operator(self, *args_w): - assert len(args_w) == arity, name+" got the wrong number of arguments" - if op and (self.resolve_constants or self.concrete_mode): - args = [] - for w_arg in args_w: - try: - arg = self.unwrap(w_arg) - except UnwrapException: - break - else: - args.append(arg) - else: - # All arguments are constants: call the operator now - #print >> sys.stderr, 'Constant operation', op - try: - result = op(*args) - except: - self.reraise() - else: - return self.wrap(result) - - #print >> sys.stderr, 'Variable operation', name, args_w - w_result = self.do_operation(name, *args_w) - self.handle_implicit_exceptions(exceptions) - return w_result - - setattr(FlowObjSpace, name, generic_operator) - -for line in ObjSpace.MethodTable: - make_op(*line) - -# ______________________________________________________________________ -# End of objspace.py diff --git a/pypy/objspace/flow/specialcase.py b/pypy/objspace/flow/specialcase.py deleted file mode 100644 index b7abeda603..0000000000 --- a/pypy/objspace/flow/specialcase.py +++ /dev/null @@ -1,90 +0,0 @@ -import types, operator -from pypy.interpreter import pyframe, baseobjspace -from pypy.interpreter.error import OperationError -from pypy.objspace.flow.objspace import UnwrapException -from pypy.objspace.flow.model import Constant - - -def getconstclass(space, w_cls): - try: - ecls = space.unwrap(w_cls) - except UnwrapException: - pass - else: - if isinstance(ecls, (type, types.ClassType)): - return ecls - return None - - -def sc_normalize_exception(space, fn, args): - """Special-case for 'raise' statements. Case-by-case analysis: - - * raise Class - - with a constant Class, it is easy to recognize. - But we don't normalize: the associated value is None. - - * raise Class(...) - - when the class is instantiated in-place, we can figure that out - - * raise Instance - - assumes that it's not a class, and raises an exception whose class - is variable and whose value is Instance. - - * raise Class, Arg - - assumes that Arg is the value you want for the exception, and - that Class is exactly the exception class. No check or normalization. - """ - w_arg1, w_arg2, w_tb = args.fixedunpack(3) - - # w_arg3 (the traceback) is ignored and replaced with None - # if it is a Variable, because pyopcode.py tries to unwrap it. - # It means that we ignore the 'tb' argument of 'raise' in most cases. - if not isinstance(w_tb, Constant): - w_tb = space.w_None - - if w_arg2 != space.w_None: - # raise Class, Arg: no normalization - return (w_arg1, w_arg2, w_tb) - - etype = getconstclass(space, w_arg1) - if etype is not None: - # raise Class - return (w_arg1, space.w_None, w_tb) - - # raise Class(..)? We need a hack to figure out of which class it is. - # Normally, Instance should have been created by the previous operation - # which should be a simple_call(<Class>, ...). - # Fetch the <Class> out of there. (This doesn't work while replaying) - if space.executioncontext.crnt_ops: - spaceop = space.executioncontext.crnt_ops[-1] - if (spaceop.opname == 'simple_call' and - spaceop.result is w_arg1): - w_type = spaceop.args[0] - return (w_type, w_arg1, w_tb) - - # raise Instance. Fall-back. - w_type = space.do_operation('type', w_arg1) - return (w_type, w_arg1, w_tb) - # this function returns a real tuple that can be handled - # by FlowObjSpace.unpacktuple() - -def sc_import(space, fn, args): - w_name, w_glob, w_loc, w_frm = args.fixedunpack(4) - return space.wrap(__import__(space.unwrap(w_name), - space.unwrap(w_glob), - space.unwrap(w_loc), - space.unwrap(w_frm))) - -def sc_operator(space, fn, args): - # XXX do this more cleanly - args_w, kwds_w = args.unpack() - assert kwds_w == {} - opname = fn.__name__.replace('__', '') - return space.do_operation(opname, *args_w) - -def setup(space): - fn = pyframe.normalize_exception.get_function(space) - space.specialcases[fn] = sc_normalize_exception - space.specialcases[__import__] = sc_import - for opname in ['lt', 'le', 'eq', 'ne', 'gt', 'ge', 'is_']: - space.specialcases[getattr(operator, opname)] = sc_operator diff --git a/pypy/objspace/flow/test/__init__.py b/pypy/objspace/flow/test/__init__.py deleted file mode 100644 index b7db25411d..0000000000 --- a/pypy/objspace/flow/test/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Empty diff --git a/pypy/objspace/flow/test/autopath.py b/pypy/objspace/flow/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/objspace/flow/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/objspace/flow/test/test_framestate.py b/pypy/objspace/flow/test/test_framestate.py deleted file mode 100644 index d492ef8f34..0000000000 --- a/pypy/objspace/flow/test/test_framestate.py +++ /dev/null @@ -1,97 +0,0 @@ - -import autopath - -from pypy.objspace.flow.flowcontext import * -from pypy.objspace.flow.model import * -from pypy.interpreter.pycode import PyCode - -objspacename = 'flow' - -class TestFrameState: - def getframe(self, func): - space = self.space - try: - func = func.im_func - except AttributeError: - pass - code = func.func_code - code = PyCode()._from_code(code) - w_globals = Constant({}) # space.newdict([]) - frame = code.create_frame(space, w_globals) - - formalargcount = code.getformalargcount() - dummy = Constant(None) - #dummy.dummy = True - arg_list = ([Variable() for i in range(formalargcount)] + - [dummy] * (len(frame.fastlocals_w) - formalargcount)) - frame.setfastscope(arg_list) - return frame - - def func_simple(x): - spam = 5 - return spam - - def test_eq_framestate(self): - frame = self.getframe(self.func_simple) - fs1 = FrameState(frame) - fs2 = FrameState(frame) - assert fs1 == fs2 - - def test_neq_hacked_framestate(self): - frame = self.getframe(self.func_simple) - fs1 = FrameState(frame) - frame.fastlocals_w[-1] = Variable() - fs2 = FrameState(frame) - assert fs1 != fs2 - - def test_union_on_equal_framestates(self): - frame = self.getframe(self.func_simple) - fs1 = FrameState(frame) - fs2 = FrameState(frame) - assert fs1.union(fs2) == fs1 - - def test_union_on_hacked_framestates(self): - frame = self.getframe(self.func_simple) - fs1 = FrameState(frame) - frame.fastlocals_w[-1] = Variable() - fs2 = FrameState(frame) - assert fs1.union(fs2) == fs2 # fs2 is more general - assert fs2.union(fs1) == fs2 # fs2 is more general - - def test_restore_frame(self): - frame = self.getframe(self.func_simple) - fs1 = FrameState(frame) - frame.fastlocals_w[-1] = Variable() - fs1.restoreframe(frame) - assert fs1 == FrameState(frame) - - def test_copy(self): - frame = self.getframe(self.func_simple) - fs1 = FrameState(frame) - fs2 = fs1.copy() - assert fs1 == fs2 - - def test_getvariables(self): - frame = self.getframe(self.func_simple) - fs1 = FrameState(frame) - vars = fs1.getvariables() - assert len(vars) == 1 - - def test_getoutputargs(self): - frame = self.getframe(self.func_simple) - fs1 = FrameState(frame) - frame.fastlocals_w[-1] = Variable() - fs2 = FrameState(frame) - outputargs = fs1.getoutputargs(fs2) - # 'x' -> 'x' is a Variable - # fastlocals_w[-1] -> fastlocals_w[-1] is Constant(None) - assert outputargs == [frame.fastlocals_w[0], Constant(None)] - - def test_union_different_constants(self): - frame = self.getframe(self.func_simple) - fs1 = FrameState(frame) - frame.fastlocals_w[-1] = Constant(42) - fs2 = FrameState(frame) - fs3 = fs1.union(fs2) - fs3.restoreframe(frame) - assert isinstance(frame.fastlocals_w[-1], Variable) # generalized diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py deleted file mode 100644 index a7b84cc2fe..0000000000 --- a/pypy/objspace/flow/test/test_model.py +++ /dev/null @@ -1,79 +0,0 @@ -import autopath - -from pypy.objspace.flow.model import * - -objspacename = 'flow' - -class TestModel: - def getflow(self, func): - import inspect - try: - func = func.im_func - except AttributeError: - pass - #name = func.func_name - return self.space.build_flow(func) - - #_____________________________________________ - def simplefunc(x): - return x+1 - - def test_simplefunc(self): - graph = self.getflow(self.simplefunc) - l = flatten(graph) - #print l - assert len(l) == 4 - - def test_class(self): - graph = self.getflow(self.simplefunc) - - class MyVisitor: - def __init__(self): - self.blocks = [] - self.links = [] - - def visit_FunctionGraph(self, graph): - self.graph = graph - def visit_Block(self, block): - self.blocks.append(block) - def visit_Link(self, link): - self.links.append(link) - - v = MyVisitor() - traverse(v, graph) - assert len(v.blocks) == 2 - assert len(v.links) == 1 - assert v.graph == graph - assert v.links[0] == graph.startblock.exits[0] - - def test_partial_class(self): - graph = self.getflow(self.simplefunc) - - class MyVisitor: - def __init__(self): - self.blocks = [] - self.links = [] - - def visit_FunctionGraph(self, graph): - self.graph = graph - def visit_Block(self, block): - self.blocks.append(block) - def visit(self, link): - self.links.append(link) - - v = MyVisitor() - traverse(v, graph) - assert len(v.blocks) == 2 - assert len(v.links) == 1 - assert v.graph == graph - assert v.links[0] == graph.startblock.exits[0] - - def loop(x): - x = abs(x) - while x: - x = x - 1 - - def test_loop(self): - graph = self.getflow(self.simplefunc) - l = flatten(graph) - assert len(l) == 4 diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py deleted file mode 100644 index f2a68bed10..0000000000 --- a/pypy/objspace/flow/test/test_objspace.py +++ /dev/null @@ -1,353 +0,0 @@ -import autopath -from pypy.objspace.flow.model import Constant -from pypy.interpreter.argument import Arguments - - -objspacename = 'flow' - -class TestFlowObjSpace: - def codetest(self, func): - import inspect - try: - func = func.im_func - except AttributeError: - pass - #name = func.func_name - graph = self.space.build_flow(func) - graph.source = inspect.getsource(func) - return graph - - def reallyshow(self, x): - x.show() - #import os - #from pypy.translator.tool.make_dot import make_dot - #dest = make_dot(x.name, x) - #os.system('gv %s' % str(dest)) - - def show(self, x): - pass # or self.reallyshow(x) - - #__________________________________________________________ - def nothing(): - pass - - def test_nothing(self): - x = self.codetest(self.nothing) - assert len(x.startblock.exits) == 1 - link, = x.startblock.exits - assert link.target == x.returnblock - self.show(x) - - #__________________________________________________________ - def simplebranch(i, j): - if i < 0: - return i - return j - - def test_simplebranch(self): - x = self.codetest(self.simplebranch) - self.show(x) - - #__________________________________________________________ - def ifthenelse(i, j): - if i < 0: - i = j - return g(i) + 1 - - def test_ifthenelse(self): - x = self.codetest(self.simplebranch) - self.show(x) - - #__________________________________________________________ - def print_(i): - print i - - def test_print(self): - x = self.codetest(self.print_) - self.show(x) - - #__________________________________________________________ - def while_(i): - while i > 0: - i = i - 1 - - def test_while(self): - x = self.codetest(self.while_) - self.show(x) - - #__________________________________________________________ - def union_easy(i): - if i: - pass - else: - i = 5 - return i - - def test_union_easy(self): - x = self.codetest(self.union_easy) - self.show(x) - - #__________________________________________________________ - def union_hard(i): - if i: - i = 5 - return i - - def test_union_hard(self): - x = self.codetest(self.union_hard) - self.show(x) - - #__________________________________________________________ - def while_union(i): - total = 0 - while i > 0: - total += i - i = i - 1 - return total - - def test_while_union(self): - x = self.codetest(self.while_union) - self.show(x) - - #__________________________________________________________ - def simple_for(lst): - total = 0 - for i in lst: - total += i - return total - - def test_simple_for(self): - x = self.codetest(self.simple_for) - self.show(x) - - #__________________________________________________________ - def nested_whiles(i, j): - s = '' - z = 5 - while z > 0: - z = z - 1 - u = i - while u < j: - u = u + 1 - s = s + '.' - s = s + '!' - return s - - def test_nested_whiles(self): - x = self.codetest(self.nested_whiles) - self.show(x) - - #__________________________________________________________ - def break_continue(x): - result = [] - i = 0 - while 1: - i = i + 1 - try: - if i&1: - continue - if i >= x: - break - finally: - result.append(i) - i = i + 1 - return result - - def test_break_continue(self): - x = self.codetest(self.break_continue) - self.show(x) - - #__________________________________________________________ - def unpack_tuple(lst): - a, b, c = lst - - def test_unpack_tuple(self): - x = self.codetest(self.unpack_tuple) - self.show(x) - - #__________________________________________________________ - def reverse_3(lst): - try: - a, b, c = lst - except: - return 0, 0, 0 - else: - return c, b, a - - def test_reverse_3(self): - x = self.codetest(self.reverse_3) - self.show(x) - - #__________________________________________________________ - def finallys(lst): - x = 1 - try: - x = 2 - try: - x = 3 - a, = lst - x = 4 - except KeyError: - return 5 - except ValueError: - return 6 - b, = lst - x = 7 - finally: - x = 8 - return x - - def test_finallys(self): - x = self.codetest(self.finallys) - self.show(x) - - #__________________________________________________________ - def implicitIndexError(lst): - try: - x = lst[5] - except IndexError: - return 'catch' - return lst[3] # not caught - - def test_implicitIndexError(self): - x = self.codetest(self.implicitIndexError) - self.show(x) - - #__________________________________________________________ - def freevar(self, x): - def adder(y): - return x+y - return adder - - def test_freevar(self): - x = self.codetest(self.freevar(3)) - self.show(x) - - #__________________________________________________________ - def raise1(msg): - raise IndexError - - def test_raise1(self): - x = self.codetest(self.raise1) - self.show(x) - assert len(x.startblock.operations) == 0 - assert x.startblock.exits[0].args == [ - Constant(IndexError), - Constant(None)] # no normalization - assert x.startblock.exits[0].target is x.exceptblock - - #__________________________________________________________ - def raise2(msg): - raise IndexError, msg - - def test_raise2(self): - x = self.codetest(self.raise2) - self.show(x) - assert len(x.startblock.operations) == 0 - assert x.startblock.exits[0].args == [ - Constant(IndexError), - x.startblock.inputargs[0]] - assert x.startblock.exits[0].target is x.exceptblock - - #__________________________________________________________ - def raise3(msg): - raise IndexError(msg) - - def test_raise3(self): - x = self.codetest(self.raise3) - self.show(x) - assert len(x.startblock.operations) == 1 - assert x.startblock.operations[0].opname == 'simple_call' - assert list(x.startblock.operations[0].args) == [ - Constant(IndexError), - x.startblock.inputargs[0]] - assert x.startblock.exits[0].args == [ - Constant(IndexError), - x.startblock.operations[0].result] - assert x.startblock.exits[0].target is x.exceptblock - - #__________________________________________________________ - def raise4(stuff): - raise stuff - - def test_raise4(self): - x = self.codetest(self.raise4) - self.show(x) - - #__________________________________________________________ - def raise_and_catch_1(exception_instance): - try: - raise exception_instance - except IndexError: - return -1 - return 0 - - def test_raise_and_catch_1(self): - x = self.codetest(self.raise_and_catch_1) - self.show(x) - - #__________________________________________________________ - def catch_simple_call(): - try: - user_defined_function() - except IndexError: - return -1 - return 0 - - def test_catch_simple_call(self): - x = self.codetest(self.catch_simple_call) - self.show(x) - - #__________________________________________________________ - def dellocal(): - x = 1 - del x - for i in range(10): - pass - - def test_dellocal(self): - x = self.codetest(self.dellocal) - self.show(x) - - #__________________________________________________________ - def globalconstdict(name): - x = DATA['x'] - z = DATA[name] - return x, z - - def test_globalconstdict(self): - x = self.codetest(self.globalconstdict) - self.show(x) - - #__________________________________________________________ - def specialcases(): - import operator - operator.lt(2,3) - operator.le(2,3) - operator.eq(2,3) - operator.ne(2,3) - operator.gt(2,3) - operator.ge(2,3) - operator.is_(2,3) - operator.__lt__(2,3) - operator.__le__(2,3) - operator.__eq__(2,3) - operator.__ne__(2,3) - operator.__gt__(2,3) - operator.__ge__(2,3) - - def test_specialcases(self): - x = self.codetest(self.specialcases) - assert len(x.startblock.operations) == 13 - for op in x.startblock.operations: - assert op.opname in ['lt', 'le', 'eq', 'ne', - 'gt', 'ge', 'is_'] - assert len(op.args) == 2 - assert op.args[0].value == 2 - assert op.args[1].value == 3 - -DATA = {'x': 5, - 'y': 6} - -def user_defined_function(): - pass diff --git a/pypy/objspace/std/__init__.py b/pypy/objspace/std/__init__.py deleted file mode 100644 index 1af2924823..0000000000 --- a/pypy/objspace/std/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from objspace import StdObjSpace -Space = StdObjSpace diff --git a/pypy/objspace/std/autopath.py b/pypy/objspace/std/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/objspace/std/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/objspace/std/basestringtype.py b/pypy/objspace/std/basestringtype.py deleted file mode 100644 index 9583db26f8..0000000000 --- a/pypy/objspace/std/basestringtype.py +++ /dev/null @@ -1,7 +0,0 @@ -from pypy.objspace.std.stdtypedef import * - - -# ____________________________________________________________ - -basestring_typedef = StdTypeDef("basestring", - ) diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py deleted file mode 100644 index 271c4a86ec..0000000000 --- a/pypy/objspace/std/boolobject.py +++ /dev/null @@ -1,43 +0,0 @@ -from pypy.objspace.std.objspace import * -from pypy.objspace.std import intobject - - -class W_BoolObject(W_Object): - from pypy.objspace.std.booltype import bool_typedef as typedef - - def __init__(w_self, space, boolval): - W_Object.__init__(w_self, space) - w_self.boolval = not not boolval - - def __nonzero__(w_self): - raise Exception, "you cannot do that, you must use space.is_true()" - - def __repr__(w_self): - """ representation for debugging purposes """ - return "%s(%s)" % (w_self.__class__.__name__, w_self.boolval) - -registerimplementation(W_BoolObject) - -# bool-to-int delegation requires translating the .boolvar attribute -# to an .intval one -def delegate__Bool(space, w_bool): - return intobject.W_IntObject(space, int(w_bool.boolval)) -delegate__Bool.result_class = intobject.W_IntObject -delegate__Bool.priority = PRIORITY_PARENT_TYPE - - -def nonzero__Bool(space, w_bool): - return w_bool - -def unwrap__Bool(space, w_bool): - return w_bool.boolval - -def repr__Bool(space, w_bool): - if w_bool.boolval: - return space.wrap('True') - else: - return space.wrap('False') - -str__Bool = repr__Bool - -register_all(vars()) diff --git a/pypy/objspace/std/booltype.py b/pypy/objspace/std/booltype.py deleted file mode 100644 index dbe88e5cb8..0000000000 --- a/pypy/objspace/std/booltype.py +++ /dev/null @@ -1,17 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.inttype import int_typedef - -# XXX should forbit subclassing of 'bool' - - -def descr__new__(space, w_booltype, w_obj=None): - if w_obj is not None and space.is_true(w_obj): - return space.w_True - else: - return space.w_False - -# ____________________________________________________________ - -bool_typedef = StdTypeDef("bool", int_typedef, - __new__ = newmethod(descr__new__), - ) diff --git a/pypy/objspace/std/default.py b/pypy/objspace/std/default.py deleted file mode 100644 index 10bb893756..0000000000 --- a/pypy/objspace/std/default.py +++ /dev/null @@ -1,229 +0,0 @@ -"""Default implementation for some operation.""" - -from pypy.objspace.std.objspace import * - - -# The following default implementations are used before delegation is tried. -# 'id' is normally the address of the wrapper. - -def id__ANY(space, w_obj): - #print 'id:', w_obj - from pypy.objspace.std import intobject - return intobject.W_IntObject(space, id(w_obj)) - -# __init__ should succeed if called internally as a multimethod - -def init__ANY(space, w_obj, w_args, w_kwds): - pass - - -# __nonzero__ falls back to __len__ - -##def is_true__ANY(space, w_obj): -## w_descr = space.lookup(w_obj, '__len__') -## if w_descr is None: -## return True -## else: -## w_len = space.get_and_call_function(w_descr, w_obj) -## return space.is_true(w_len) - -### in-place operators fall back to their non-in-place counterpart - -##for _name, _symbol, _arity, _specialnames in ObjSpace.MethodTable: -## if _name.startswith('inplace_'): -## def default_inplace(space, w_1, w_2, baseop=_name[8:]): -## op = getattr(space, baseop) -## return op(w_1, w_2) -## getattr(StdObjSpace.MM, _name).register(default_inplace, -## W_Object, W_ANY) - -# '__get__(descr, inst, cls)' returns 'descr' by default - -#def get__Object_ANY_ANY(space, w_descr, w_inst, w_cls): -# return w_descr - -#def is_data_descr__Object(space, w_descr): -# return 0 - -# give objects some default attributes and a default way to complain -# about missing attributes - -##def getattribute__Object_ANY(space, w_obj, w_attr): -## # XXX build a nicer error message along these lines: -## #w_type = space.type(w_obj) -## #w_typename = space.getattr(w_type, space.wrap('__name__')) -## #... - -## w_type = space.type(w_obj) -## if space.is_true(space.eq(w_attr, space.wrap('__class__'))): -## return w_type - -## # 1) look for descriptor -## # 2) if data descriptor, call it -## # 3) check __dict__ -## # 4) if present, return that -## # 5) if descriptor found in 2), call that -## # 6) raise AttrbuteError - -## w_descr = None - -## from typeobject import W_TypeObject -## if isinstance(w_type, W_TypeObject): # XXX must always be true at some point -## try: -## w_descr = w_type.lookup(w_attr) -## except KeyError: -## pass -## else: -## if space.is_data_descr(w_descr): -## return space.get(w_descr, w_obj, w_type) # XXX 3rd arg is wrong - -## try: -## w_dict = space.getdict(w_obj) -## except OperationError, e: -## if not e.match(space, space.w_TypeError): # 'unsupported type for getdict' -## raise -## else: -## if space.is_true(space.eq(w_attr, space.wrap('__dict__'))): -## return w_dict -## try: -## w_value = space.getitem(w_dict, w_attr) -## except OperationError, e: -## if not e.match(space, space.w_KeyError): -## raise -## else: -## return w_value # got a value from 'obj.__dict__[attr]' - -## if w_descr is not None: -## return space.get(w_descr, w_obj, w_type) - -## raise OperationError(space.w_AttributeError, w_attr) - - -# set attributes, complaining about read-only ones -- -# a more declarative way to define attributes would be welcome - -##def setattr__Object_ANY_ANY(space, w_obj, w_attr, w_value): - -## # 1) look for descriptor -## # 2) if data descriptor, call it -## # 3) try to set item in __dict__ - -## w_type = space.type(w_obj) -## if space.is_true(space.eq(w_attr, space.wrap('__class__'))): -## raise OperationError(space.w_AttributeError, -## space.wrap("read-only attribute")) -## if space.is_true(space.eq(w_attr, space.wrap('__dict__'))): -## raise OperationError(space.w_AttributeError, -## space.wrap("read-only attribute")) - -## from typeobject import W_TypeObject -## if isinstance(w_type, W_TypeObject): -## try: -## w_descr = w_type.lookup(w_attr) -## except KeyError: -## pass -## else: -## if space.is_data_descr(w_descr): -## return space.set(w_descr, w_obj, w_value) - -## try: -## w_dict = space.getdict(w_obj) -## except OperationError, e: -## if not e.match(space, space.w_TypeError): # "unsupported type for getdict" -## raise -## raise OperationError(space.w_AttributeError, w_attr) -## else: -## space.setitem(w_dict, w_attr, w_value) - - -##def delattr__Object_ANY(space, w_obj, w_attr): -## w_type = space.type(w_obj) -## if space.is_true(space.eq(w_attr, space.wrap('__class__'))): -## raise OperationError(space.w_AttributeError, -## space.wrap("read-only attribute")) -## if space.is_true(space.eq(w_attr, space.wrap('__dict__'))): -## raise OperationError(space.w_AttributeError, -## space.wrap("read-only attribute")) - -## from typeobject import W_TypeObject -## if isinstance(w_type, W_TypeObject): -## try: -## w_descr = w_type.lookup(w_attr) -## except KeyError: -## pass -## else: -## #space.type(w_descr).lookup(space.wrap('__delete__')) -## if space.is_data_descr(w_descr): -## return space.delete(w_descr, w_obj) - -## try: -## w_dict = space.getdict(w_obj) -## except OperationError, e: -## if not e.match(space, space.w_TypeError): # "unsupported type for getdict" -## raise -## raise OperationError(space.w_AttributeError, w_attr) -## else: -## try: -## space.delitem(w_dict, w_attr) -## except OperationError, e: -## if not e.match(space, space.w_KeyError): -## raise -## raise OperationError(space.w_AttributeError, w_attr) - -# static types - -##def type__Object(space, w_obj): -## if w_obj.statictype is None: -## # XXX remove me, temporary -## return space.wrap(space.unwrap(w_obj).__class__) -## else: -## w_type = space.get_typeinstance(w_obj.statictype) -## return w_type - -# repr(), str(), hash() - -##def repr__Object(space, w_obj): -## return space.wrap('<%s object at %s>'%( -## space.type(w_obj).typename, space.unwrap(space.id(w_obj)))) - -##def str__Object(space, w_obj): -## return space.repr(w_obj) - -##def hash__ANY(space, w_obj): -## return space.id(w_obj) - - -# The following operations are fall-backs if we really cannot find -# anything else even with delegation. -# 'eq' falls back to 'is' - -##def eq__ANY_ANY(space, w_a, w_b): -## return space.is_(w_a, w_b) - -# 'contains' falls back to iteration. - -##def contains__ANY_ANY(space, w_iterable, w_lookfor): -## w_iter = space.iter(w_iterable) -## while 1: -## try: -## w_next = space.next(w_iter) -## except OperationError, e: -## if not e.match(space, space.w_StopIteration): -## raise -## return space.w_False -## if space.is_true(space.eq(w_next, w_lookfor)): -## return space.w_True - -# ugh - -class UnwrapError(Exception): - pass - -def unwrap__ANY(space, w_obj): - if isinstance(w_obj, BaseWrappable): - return w_obj - else: - raise UnwrapError, 'cannot unwrap %r' % (w_obj,) - - -register_all(vars()) diff --git a/pypy/objspace/std/dictobject.py b/pypy/objspace/std/dictobject.py deleted file mode 100644 index bb38fdd3da..0000000000 --- a/pypy/objspace/std/dictobject.py +++ /dev/null @@ -1,264 +0,0 @@ -""" -Reviewed 03-06-22 -All common dictionary methods are correctly implemented, -tested, and complete. The only missing feature is support -for order comparisons. -""" - -from pypy.objspace.std.objspace import * -from pypy.interpreter import gateway - -from pypy.objspace.std.restricted_int import r_uint - -dummy = object() - -class Entry: - def __init__(self): - self.hash = r_uint(0) - self.w_key = None - self.w_value = None - def __repr__(self): - return '<Entry %r,%r,%r>'%(self.hash, self.w_key, self.w_value) - -class W_DictObject(W_Object): - from pypy.objspace.std.dicttype import dict_typedef as typedef - - def __init__(w_self, space, list_pairs_w): - W_Object.__init__(w_self, space) - - w_self.used = 0 - w_self.data = [] - w_self.resize(len(list_pairs_w)*2) - for w_k, w_v in list_pairs_w: - w_self.insert(w_self.hash(w_k), w_k, w_v) - - def __repr__(w_self): - """ representation for debugging purposes """ - return "%s(%s)" % (w_self.__class__.__name__, w_self.data) - - def hash(w_self, w_obj): - space = w_self.space - return r_uint(space.unwrap(space.hash(w_obj))) - - def insert(self, h, w_key, w_value): - entry = self.lookdict(h, w_key) - if entry.w_value is None: - self.used += 1 - entry.hash = h - entry.w_key = w_key - entry.w_value = w_value - else: - entry.w_value = w_value - - def resize(self, minused): - newsize = 4 - while newsize < minused: - newsize *= 2 - od = self.data - - self.used = 0 - self.data = [Entry() for i in range(newsize)] - for entry in od: - if entry.w_value is not None: - self.insert(entry.hash, entry.w_key, entry.w_value) - - def non_empties(self): - return [entry for entry in self.data if entry.w_value is not None] - - def lookdict(self, lookup_hash, w_lookup): - assert isinstance(lookup_hash, r_uint) - space = self.space - i = lookup_hash % len(self.data) - - entry = self.data[i] - if entry.w_key is None or \ - space.is_true(space.is_(w_lookup, entry.w_key)): - return entry - if entry.w_key is dummy: - freeslot = entry - else: - if entry.hash == lookup_hash and space.is_true( - space.eq(entry.w_key, w_lookup)): - return entry - freeslot = None - - perturb = lookup_hash - ##c = len(self.data) + 99 - while 1: - ##c -= 1 - ##if not c: - ## import sys, pdb - ## print >> sys.stderr, 'dict lookup lost in infinite loop' - ## pdb.set_trace() - i = (i << 2) + i + perturb + 1 - entry = self.data[i%len(self.data)] - if entry.w_value is None: - if freeslot: - return freeslot - else: - return entry - if entry.hash == lookup_hash and entry.w_key is not dummy \ - and space.is_true( - space.eq(entry.w_key, w_lookup)): - return entry - if entry.w_key is dummy and freeslot is None: - freeslot = entry - perturb >>= 5 - -registerimplementation(W_DictObject) - - -def unwrap__Dict(space, w_dict): - result = {} - for entry in w_dict.non_empties(): - result[space.unwrap(entry.w_key)] = space.unwrap(entry.w_value) - return result - -def init__Dict(space, w_dict, w_args, w_kwds): - dict_clear__Dict(space, w_dict) - args = space.unpackiterable(w_args) - if len(args) == 0: - pass - elif len(args) == 1: - # XXX do dict({...}) with dict_update__Dict_Dict() - try: - space.getattr(args[0], space.wrap("keys")) - except OperationError: - list_of_w_pairs = space.unpackiterable(args[0]) - for w_pair in list_of_w_pairs: - pair = space.unpackiterable(w_pair) - if len(pair)!=2: - raise OperationError(space.w_ValueError, - space.wrap("dict() takes a sequence of pairs")) - w_k, w_v = pair - setitem__Dict_ANY_ANY(space, w_dict, w_k, w_v) - else: - from pypy.objspace.std.dicttype import dict_update__ANY_ANY - dict_update__ANY_ANY(space, w_dict, args[0]) - else: - raise OperationError(space.w_TypeError, - space.wrap("dict() takes at most 1 argument")) - space.call_method(w_dict, 'update', w_kwds) - -def getitem__Dict_ANY(space, w_dict, w_lookup): - entry = w_dict.lookdict(w_dict.hash(w_lookup), w_lookup) - if entry.w_value is not None: - return entry.w_value - else: - raise OperationError(space.w_KeyError, w_lookup) - -def setitem__Dict_ANY_ANY(space, w_dict, w_newkey, w_newvalue): - w_dict.insert(w_dict.hash(w_newkey), w_newkey, w_newvalue) - if 2*w_dict.used > len(w_dict.data): - w_dict.resize(2*w_dict.used) - -def delitem__Dict_ANY(space, w_dict, w_lookup): - entry = w_dict.lookdict(w_dict.hash(w_lookup), w_lookup) - if entry.w_value is not None: - w_dict.used -= 1 - entry.w_key = dummy - entry.w_value = None - else: - raise OperationError(space.w_KeyError, w_lookup) - -def len__Dict(space, w_dict): - return space.wrap(w_dict.used) - -def contains__Dict_ANY(space, w_dict, w_lookup): - entry = w_dict.lookdict(w_dict.hash(w_lookup), w_lookup) - return space.newbool(entry.w_value is not None) - -dict_has_key__Dict_ANY = contains__Dict_ANY - -def iter__Dict(space, w_dict): - from pypy.objspace.std import iterobject - w_keys = dict_keys__Dict(space, w_dict) - return iterobject.W_SeqIterObject(space, w_keys) - -def eq__Dict_Dict(space, w_left, w_right): - if space.is_true(space.is_(w_left, w_right)): - return space.w_True - - dataleft = w_left.non_empties() - dataright = w_right.non_empties() - if len(dataleft) != len(dataright): - return space.w_False - for entry in dataleft: - try: - w_rightval = space.getitem(w_right, entry.w_key) - except OperationError: - return space.w_False - if not space.is_true(space.eq(entry.w_value, w_rightval)): - return space.w_False - return space.w_True - -def lt__Dict_Dict(space, w_left, w_right): - # Different sizes, no problem - dataleft = w_left.non_empties() - dataright = w_right.non_empties() - if len(dataleft) < len(dataright): - return space.w_True - if len(dataleft) > len(dataright): - return space.w_False - - # Same size - for entry in dataleft: - # This is incorrect, but we need to decide what comparisons on - # dictionaries of equal size actually means - # The Python language specification is silent on the subject - try: - w_rightval = space.getitem(w_right, entry.w_key) - except OperationError: - return space.w_True - if space.is_true(space.lt(entry.w_value, w_rightval)): - return space.w_True - # The dictionaries are equal. This is correct. - return space.w_False - -def hash__Dict(space,w_dict): - raise OperationError(space.w_TypeError,space.wrap("dict objects are unhashable")) - -def dict_copy__Dict(space, w_self): - return W_DictObject(space, [(entry.w_key,entry.w_value) - for entry in w_self.data - if entry.w_value is not None]) - -def dict_items__Dict(space, w_self): - return space.newlist([ space.newtuple([entry.w_key,entry.w_value]) - for entry in w_self.data - if entry.w_value is not None]) - -def dict_keys__Dict(space, w_self): - return space.newlist([ entry.w_key - for entry in w_self.data - if entry.w_value is not None]) - -def dict_values__Dict(space, w_self): - return space.newlist([ entry.w_value - for entry in w_self.data - if entry.w_value is not None]) - -def dict_clear__Dict(space, w_self): - w_self.data = [Entry()] - w_self.used = 0 - -def dict_get__Dict_ANY_ANY(space, w_dict, w_lookup, w_default): - entry = w_dict.lookdict(w_dict.hash(w_lookup), w_lookup) - if entry.w_value is not None: - return entry.w_value - else: - return w_default - -# Now we only handle one implementation of dicts, this one. -# The fix is to move this to dicttype.py, and do a -# multimethod lookup mapping str to StdObjSpace.str -# This cannot happen until multimethods are fixed. See dicttype.py -def app_str__Dict(d): - items = [] - for k, v in d.iteritems(): - items.append(repr(k) + ": " + repr(v)) - return "{" + ', '.join(items) + "}" - -repr__Dict = str__Dict = gateway.app2interp(app_str__Dict) -from pypy.objspace.std import dicttype -register_all(vars(), dicttype) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py deleted file mode 100644 index 7b7bfde18a..0000000000 --- a/pypy/objspace/std/dictproxyobject.py +++ /dev/null @@ -1,19 +0,0 @@ -from pypy.objspace.std.objspace import * -from pypy.interpreter.typedef import GetSetProperty - -def descr_get_dictproxy(space, w_obj): - obj = space.unwrap_builtin(w_obj) - return W_DictProxyObject(space, obj.getdict()) - -dictproxy_descr = GetSetProperty(descr_get_dictproxy) - -class W_DictProxyObject(W_Object): - from pypy.objspace.std.dictproxytype import dictproxy_typedef as typedef - - def __init__(w_self, space, w_dict): - W_Object.__init__(w_self, space) - w_self.w_dict = w_dict - -registerimplementation(W_DictProxyObject) - -register_all(vars()) diff --git a/pypy/objspace/std/dictproxytype.py b/pypy/objspace/std/dictproxytype.py deleted file mode 100644 index d88cd16696..0000000000 --- a/pypy/objspace/std/dictproxytype.py +++ /dev/null @@ -1,38 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.register_all import register_all -from pypy.interpreter.error import OperationError - -# ____________________________________________________________ - -def proxymethod(name): - def fget(space, w_obj): - obj = space.unwrap_builtin(w_obj) - return space.getattr(obj.w_dict, space.wrap(name)) - return GetSetProperty(fget) - -# ____________________________________________________________ - -dictproxy_typedef = StdTypeDef("dictproxy", - has_key = proxymethod('has_key'), - get = proxymethod('get'), - keys = proxymethod('keys'), - values = proxymethod('values'), - items = proxymethod('items'), - iterkeys = proxymethod('iterkeys'), - itervalues = proxymethod('itervalues'), - iteritems = proxymethod('iteritems'), - copy = proxymethod('copy'), - __len__ = proxymethod('__len__'), - __getitem__ = proxymethod('__getitem__'), - __contains__ = proxymethod('__contains__'), - __str__ = proxymethod('__str__'), - __iter__ = proxymethod('__iter__'), - __cmp__ = proxymethod('__cmp__'), - __lt__ = proxymethod('__lt__'), - __le__ = proxymethod('__le__'), - __eq__ = proxymethod('__eq__'), - __ne__ = proxymethod('__ne__'), - __gt__ = proxymethod('__gt__'), - __ge__ = proxymethod('__ge__'), -) -dictproxy_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/dicttype.py b/pypy/objspace/std/dicttype.py deleted file mode 100644 index bc321104d5..0000000000 --- a/pypy/objspace/std/dicttype.py +++ /dev/null @@ -1,107 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.register_all import register_all - -dict_copy = MultiMethod('copy', 1) -dict_items = MultiMethod('items', 1) -dict_keys = MultiMethod('keys', 1) -dict_values = MultiMethod('values', 1) -dict_has_key = MultiMethod('has_key', 2) -dict_clear = MultiMethod('clear', 1) -dict_get = MultiMethod('get', 3, defaults=(None,)) -dict_pop = MultiMethod('pop', 2, varargs=True) -dict_popitem = MultiMethod('popitem', 1) -dict_setdefault = MultiMethod('setdefault', 3, defaults=(None,)) -dict_update = MultiMethod('update', 2) -dict_iteritems = MultiMethod('iteritems', 1) -dict_iterkeys = MultiMethod('iterkeys', 1) -dict_itervalues = MultiMethod('itervalues', 1) -#dict_fromkeys = MultiMethod('fromkeys', 2, varargs=True) -# This can return when multimethods have been fixed -#dict_str = StdObjSpace.str - -# default application-level implementations for some operations - -def app_dict_update__ANY_ANY(d, o): - for k in o.keys(): - d[k] = o[k] - -def app_dict_popitem__ANY(d): - k = d.keys() - if not k: - raise KeyError("popitem(): dictionary is empty") - k = k[0] - v = d[k] - del d[k] - return k, v - -def app_dict_get__ANY_ANY_ANY(d, k, v=None): - if d.has_key(k): - return d[k] - return v - -def app_dict_setdefault__ANY_ANY_ANY(d, k, v): - if d.has_key(k): - return d[k] - d[k] = v - return v - -def app_dict_pop__ANY_ANY(d, k, default): - if len(default) > 1: - raise TypeError, "pop expected at most 2 arguments, got %d" % ( - 1 + len(default)) - try: - v = d[k] - del d[k] - except KeyError, e: - if default: - return default[0] - else: - raise e - return v - -def app_dict_iteritems__ANY(d): - return iter(d.items()) - -def app_dict_iterkeys__ANY(d): - return iter(d.keys()) - -def app_dict_itervalues__ANY(d): - return iter(d.values()) - -#def app_dict_fromkeys__ANY_List(d, seq, value): -# d = {} -# if value: -# value = value[0] -# else: -# value = None -# for item in seq: -# d[item] = value -# return d -#XXX implement dict.fromkeys() which must be a static method -#XXX accepting any iterable - -# This can return when multimethods have been fixed -""" -def app_dict_str__ANY(d): - items = [] - for k, v in d.iteritems(): - items.append("%r: %r" % (k, v)) - return "{%s}" % ', '.join(items) -""" -gateway.importall(globals()) -register_all(vars(), globals()) - -# ____________________________________________________________ - -def descr__new__(space, w_dicttype, __args__): - from pypy.objspace.std.dictobject import W_DictObject - w_obj = space.allocate_instance(W_DictObject, w_dicttype) - w_obj.__init__(space, []) - return w_obj - -# ____________________________________________________________ - -dict_typedef = StdTypeDef("dict", - __new__ = newmethod(descr__new__), - ) -dict_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/dump_multimethod.py b/pypy/objspace/std/dump_multimethod.py deleted file mode 100644 index 6b62d7a231..0000000000 --- a/pypy/objspace/std/dump_multimethod.py +++ /dev/null @@ -1,153 +0,0 @@ -from __future__ import generators -import autopath -import sys -from pypy.objspace.std import StdObjSpace -from pypy.objspace.std.multimethod import * - -# TODO: local multimethods - -IMPLEMENTATIONS = [ - "pypy.objspace.std.objectobject.W_ObjectObject", - "pypy.objspace.std.boolobject.W_BoolObject", - "pypy.objspace.std.intobject.W_IntObject", - "pypy.objspace.std.floatobject.W_FloatObject", - "pypy.objspace.std.tupleobject.W_TupleObject", - "pypy.objspace.std.listobject.W_ListObject", - "pypy.objspace.std.dictobject.W_DictObject", - "pypy.objspace.std.stringobject.W_StringObject", - "pypy.objspace.std.typeobject.W_TypeObject", - "pypy.objspace.std.sliceobject.W_SliceObject", - "pypy.objspace.std.longobject.W_LongObject", - "pypy.objspace.std.noneobject.W_NoneObject", - "pypy.objspace.std.iterobject.W_SeqIterObject", - "pypy.objspace.std.unicodeobject.W_UnicodeObject", - ] - -def import_implementations(): - # populate the multimethod tables by importing all object implementations - from pypy.objspace.std import default - result = [] - for fullpath in IMPLEMENTATIONS: - i = fullpath.rfind('.') - assert i>=0 - modname, clsname = fullpath[:i], fullpath[i+1:] - module = __import__(modname, globals(), {}, [clsname]) - result.append(getattr(module, clsname)) - return result - -def list_multimethods(impls=[]): - collected = {} - for name, value in (StdObjSpace.MM.__dict__.items() + - StdObjSpace.__dict__.items()): - if isinstance(value, MultiMethod): - collected[value] = name - for impl in impls: - for mm in impl.typedef.local_multimethods: - collected[mm] = '%s.%s' % (impl.typedef.name, mm.operatorsymbol) - result = [(name, value) for value, name in collected.iteritems()] - result.sort() - return result - -def cartesian_prod(lstlst): - if len(lstlst) == 0: - yield () - else: - for first in lstlst[0]: - for rest in cartesian_prod(lstlst[1:]): - yield (first,) + rest - -def dump_table(mm, impls): - print 'multimethod %r of arity %d.' % (mm.operatorsymbol, mm.arity) - if mm.dispatch_arity < mm.arity: - print 'dispatch arity is actually only %d.' % mm.dispatch_arity - delegate = StdObjSpace.delegate - versions = {} - sourcecache = {} - for argclasses in cartesian_prod([impls] * mm.dispatch_arity): - calllist = [] - mm.internal_buildcalllist(argclasses, delegate, calllist) - calllist = tuple(calllist) - try: - key = sourcecache[calllist] - except KeyError: - src, glob = mm.internal_sourcecalllist(calllist) - if 'FailedToImplement' in glob: - del glob['FailedToImplement'] - order = len(glob) - else: - order = 0.5 - glob = glob.items() - glob.sort() - glob = tuple(glob) - key = sourcecache[calllist] = order, src, glob - versions.setdefault(key, []).append(argclasses) - versions = versions.items() - versions.sort() - versions.reverse() - for (order, src, glob), lstargclasses in versions: - print - # collapse ranges within argclasses where the last arg is not - # relevant - i = len(lstargclasses)-1 - m = mm.dispatch_arity-1 - while i >= 0: - if i+len(impls) <= len(lstargclasses): - model = lstargclasses[i][:m] - for next in lstargclasses[i+1:i+len(impls)]: - if next[:m] == model and next[m+1:] == (W_ANY,)*(mm.dispatch_arity-1-m): - pass - else: - break - else: - lstargclasses[i:i+len(impls)] = [model + (W_ANY,)*(mm.dispatch_arity-m)] - if m > 0: - m -= 1 - continue - i -= 1 - m = mm.dispatch_arity-1 - - for argclasses in lstargclasses: - print '#', - for cls in argclasses: - if cls is W_ANY: - print '*', - else: - print cls.__name__, - print - print - - # prettify src - if not src: - src = 'directly using do' - lines = src.split('\n') - for j in range(len(lines)): - line = lines[j] - i = 1 - while line[:i] == ' '*i: - i += 1 - i -= 1 - lines[j] = ' '*i + line[i:] - src = '\n'.join(lines) - src = src.replace(',', ', ') - for key, value in glob: - try: - s = value.__name__ - except AttributeError: - s = repr(value) - src = src.replace(key, s) - print src - -if __name__ == '__main__': - impls = import_implementations() - total = 0 - restrict = sys.argv[1:] - for name, mm in list_multimethods(impls): - if (not restrict and name != 'delegate') or name in restrict: - print - print '==========', name, '==========' - print >> sys.stderr, name # progress bar - print - dump_table(mm, impls) - total += 1 - if not total: - print >> sys.stderr, "no matching multimethod name" diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py deleted file mode 100644 index 6ff6ec5297..0000000000 --- a/pypy/objspace/std/fake.py +++ /dev/null @@ -1,167 +0,0 @@ -from pypy.interpreter.error import OperationError -from pypy.interpreter import eval -from pypy.interpreter.function import Function -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.objspace import W_Object, StdObjSpace -from pypy.objspace.std.default import UnwrapError -from pypy.tool.cache import Cache - -# this file automatically generates non-reimplementations of CPython -# types that we do not yet implement in the standard object space -# (files being the biggy) - -import sys - -_fake_type_cache = Cache() - -# real-to-wrapped exceptions -def wrap_exception(space): - exc, value, tb = sys.exc_info() - if exc is OperationError: - raise exc, value, tb # just re-raise it - name = exc.__name__ - if hasattr(space, 'w_' + name): - w_exc = getattr(space, 'w_' + name) - w_value = space.call_function(w_exc, - *[space.wrap(a) for a in value.args]) - for key, value in value.__dict__.items(): - if not key.startswith('_'): - space.setattr(w_value, space.wrap(key), space.wrap(value)) - else: - w_exc = space.wrap(exc) - w_value = space.wrap(value) - raise OperationError, OperationError(w_exc, w_value), tb - -def fake_type(cpy_type): - assert type(cpy_type) is type - return _fake_type_cache.getorbuild(cpy_type, really_build_fake_type, None) - -def really_build_fake_type(cpy_type, ignored): - "NOT_RPYTHON (not remotely so!)." - print 'faking %r'%(cpy_type,) - kw = {} - for s, v in cpy_type.__dict__.items(): - if cpy_type is not unicode or s not in ['__add__', '__contains__']: - kw[s] = v - def fake__new__(space, w_type, *args_w): - args = [space.unwrap(w_arg) for w_arg in args_w] - try: - r = cpy_type.__new__(cpy_type, *args) - except: - wrap_exception(space) - return W_Fake(space, r) - kw['__new__'] = gateway.interp2app(fake__new__) - if cpy_type.__base__ is not object: - assert cpy_type.__base__ is basestring - from pypy.objspace.std.basestringtype import basestring_typedef - base = basestring_typedef - else: - base = None - class W_Fake(W_Object): - typedef = StdTypeDef( - cpy_type.__name__, base, **kw) - def __init__(w_self, space, val): - W_Object.__init__(w_self, space) - w_self.val = val - # cannot write to W_Fake.__name__ in Python 2.2! - W_Fake = type(W_Object)('W_Fake%s'%(cpy_type.__name__.capitalize()), - (W_Object,), - dict(W_Fake.__dict__.items())) - def fake_unwrap(space, w_obj): - return w_obj.val - StdObjSpace.unwrap.register(fake_unwrap, W_Fake) - W_Fake.typedef.fakedcpytype = cpy_type - return W_Fake - -# ____________________________________________________________ -# -# Special case for built-in functions, methods, and slot wrappers. - -class CPythonFakeCode(eval.Code): - def __init__(self, cpy_callable): - eval.Code.__init__(self, getattr(cpy_callable, '__name__', '?')) - self.cpy_callable = cpy_callable - assert callable(cpy_callable), cpy_callable - def create_frame(self, space, w_globals, closure=None): - return CPythonFakeFrame(space, self, w_globals) - def signature(self): - return [], 'args', 'kwds' - -class CPythonFakeFrame(eval.Frame): - - def setfastscope(self, scope_w): - w_args, w_kwds = scope_w - try: - self.unwrappedargs = self.space.unwrap(w_args) - self.unwrappedkwds = self.space.unwrap(w_kwds) - except UnwrapError, e: - raise UnwrapError('calling %s: %s' % (self.code.cpy_callable, e)) - - def getfastscope(self): - raise OperationError(self.space.w_TypeError, - self.space.wrap("cannot get fastscope of a CPythonFakeFrame")) - def run(self): - fn = self.code.cpy_callable - try: - result = apply(fn, self.unwrappedargs, self.unwrappedkwds) - except: - wrap_exception(self.space) - return self.space.wrap(result) - -def fake_builtin_callable(space, val): - return Function(space, CPythonFakeCode(val)) - -_fake_type_cache.content[type(len)] = fake_builtin_callable -_fake_type_cache.content[type(list.append)] = fake_builtin_callable -_fake_type_cache.content[type(type(None).__repr__)] = fake_builtin_callable - - -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app - -class W_FakeDescriptor(Wrappable): - # Mimics pypy.interpreter.typedef.GetSetProperty. - - def __init__(self, space, d): - self.name = d.__name__ - - def descr_descriptor_get(space, w_descriptor, w_obj, w_cls=None): - # XXX HAAAAAAAAAAAACK (but possibly a good one) - if w_obj == space.w_None and not space.is_true(space.is_(w_cls, space.type(space.w_None))): - #print w_descriptor, w_obj, w_cls - return w_descriptor - else: - name = space.unwrap(w_descriptor).name - obj = space.unwrap(w_obj) - try: - val = getattr(obj, name) # this gives a "not RPython" warning - except: - wrap_exception(space) - return space.wrap(val) - - def descr_descriptor_set(space, w_descriptor, w_obj, w_value): - name = space.unwrap(w_descriptor).name - obj = space.unwrap(w_obj) - val = space.unwrap(w_value) - try: - setattr(obj, name, val) # this gives a "not RPython" warning - except: - wrap_exception(space) - - def descr_descriptor_del(space, w_descriptor, w_obj): - name = space.unwrap(w_descriptor).name - obj = space.unwrap(w_obj) - try: - delattr(obj, name) - except: - wrap_exception(space) - - typedef = TypeDef("FakeDescriptor", - __get__ = interp2app(descr_descriptor_get), - __set__ = interp2app(descr_descriptor_set), - __delete__ = interp2app(descr_descriptor_del), - ) - -_fake_type_cache.content[type(file.softspace)] = W_FakeDescriptor -_fake_type_cache.content[type(type.__dict__['__dict__'])] = W_FakeDescriptor diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py deleted file mode 100644 index b57c927771..0000000000 --- a/pypy/objspace/std/floatobject.py +++ /dev/null @@ -1,267 +0,0 @@ -from pypy.objspace.std.objspace import * -from pypy.interpreter import gateway -from pypy.objspace.std.noneobject import W_NoneObject - -############################################################## -# for the time being, all calls that are made to some external -# libraries in the floatobject.c, calls are made into the -# python math library -############################################################## - -import math -from pypy.objspace.std.intobject import W_IntObject - -class W_FloatObject(W_Object): - """This is a reimplementation of the CPython "PyFloatObject" - it is assumed that the constructor takes a real Python float as - an argument""" - from pypy.objspace.std.floattype import float_typedef as typedef - - def __init__(w_self, space, floatval): - W_Object.__init__(w_self, space) - w_self.floatval = floatval - - -registerimplementation(W_FloatObject) - -# int-to-float delegation -def delegate__Int(space, w_intobj): - return W_FloatObject(space, float(w_intobj.intval)) -delegate__Int.result_class = W_FloatObject -delegate__Int.priority = PRIORITY_CHANGE_TYPE - - -# float__Float is supposed to do nothing, unless it has -# a derived float object, where it should return -# an exact one. -def float__Float(space, w_float1): - if space.is_true(space.is_(space.type(w_float1), space.w_float)): - return w_float1 - a = w_float1.floatval - return W_FloatObject(space, a) - -def int__Float(space, w_value): - return space.newint(int(w_value.floatval)) - -def unwrap__Float(space, w_float): - return w_float.floatval - -def app_repr__Float(f): - r = "%.17g"%f - for c in r: - if c not in '-0123456789': - return r - else: - return r + '.0' - -def app_str__Float(f): - r = "%.12g"%f - for c in r: - if c not in '-0123456789': - return r - else: - return r + '.0' - -def lt__Float_Float(space, w_float1, w_float2): - i = w_float1.floatval - j = w_float2.floatval - return space.newbool( i < j ) - -def le__Float_Float(space, w_float1, w_float2): - i = w_float1.floatval - j = w_float2.floatval - return space.newbool( i <= j ) - -def eq__Float_Float(space, w_float1, w_float2): - i = w_float1.floatval - j = w_float2.floatval - return space.newbool( i == j ) - -def ne__Float_Float(space, w_float1, w_float2): - i = w_float1.floatval - j = w_float2.floatval - return space.newbool( i != j ) - -def gt__Float_Float(space, w_float1, w_float2): - i = w_float1.floatval - j = w_float2.floatval - return space.newbool( i > j ) - -def ge__Float_Float(space, w_float1, w_float2): - i = w_float1.floatval - j = w_float2.floatval - return space.newbool( i >= j ) - -def _floor(f): - return f - (f % 1.0) - -def _ceil(f): - if f - (f % 1.0) == f: - return f - return f + 1.0 - (f % 1.0) - -def round__Float_Int(space, w_float, w_int): - x = w_float.floatval - ndigits = w_int.intval - - # Algortithm copied directly from CPython - f = 1.0; - i = abs(ndigits); - - while i > 0: - f = f*10.0 - i -= 1 - if ndigits < 0: - x /= f - else: - x *= f - if x >= 0.0: - x = _floor(x + 0.5) - else: - x = _ceil(x - 0.5) - if ndigits < 0: - x *= f - else: - x /= f - return W_FloatObject(space, x) - -def hash__Float(space,w_value): - ## %reimplement% - # real Implementation should be taken from _Py_HashDouble in object.c - return space.wrap(hash(w_value.floatval)) - -def add__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - try: - z = x + y - except FloatingPointError: - raise FailedToImplement(space.w_FloatingPointError, space.wrap("float addition")) - return W_FloatObject(space, z) - -def sub__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - try: - z = x - y - except FloatingPointError: - raise FailedToImplement(space.w_FloatingPointError, space.wrap("float substraction")) - return W_FloatObject(space, z) - -def mul__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - try: - z = x * y - except FloatingPointError: - raise FailedToImplement(space.w_FloatingPointError, space.wrap("float multiplication")) - return W_FloatObject(space, z) - -def div__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - try: - z = x / y - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, space.wrap("float division")) - except FloatingPointError: - raise FailedToImplement(space.w_FloatingPointError, space.wrap("float division")) - # no overflow - return W_FloatObject(space, z) - -def floordiv__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - try: - z = x // y - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, space.wrap("float division")) - except FloatingPointError: - raise FailedToImplement(space.w_FloatingPointError, space.wrap("float division")) - # no overflow - return W_FloatObject(space, z) - -def mod__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - if y == 0.0: - raise FailedToImplement(space.w_ZeroDivisionError, space.wrap("float modulo")) - try: - # this is a hack!!!! must be replaced by a real fmod function - mod = math.fmod(x,y) - if (mod and ((y < 0.0) != (mod < 0.0))): - mod += y - except FloatingPointError: - raise FailedToImplement(space.w_FloatingPointError, space.wrap("float division")) - - return W_FloatObject(space, mod) - -def divmod__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - if y == 0.0: - raise FailedToImplement(space.w_ZeroDivisionError, space.wrap("float modulo")) - try: - # XXX this is a hack!!!! must be replaced by a real fmod function - mod = math.fmod(x,y) - div = (x -mod) / y - if (mod): - if ((y < 0.0) != (mod < 0.0)): - mod += y - div -= -1.0 - else: - mod *= mod - if y < 0.0: - mod = -mod - if div: - floordiv = math.floor(div) - if (div - floordiv > 0.5): - floordiv += 1.0 - else: - div *= div; - floordiv = div * x / y - except FloatingPointError: - raise FailedToImplement(space.w_FloatingPointError, space.wrap("float division")) - - return space.newtuple([W_FloatObject(space, floordiv), - W_FloatObject(space, mod)]) - -def pow__Float_Float_ANY(space, w_float1, w_float2, thirdArg): - if thirdArg is not space.w_None: - raise FailedToImplement(space.w_TypeError, space.wrap("pow() 3rd argument not allowed unless all arguments are integers")) - x = w_float1.floatval - y = w_float2.floatval - try: - z = x ** y - except OverflowError: - raise FailedToImplement(space.w_OverflowError, space.wrap("float power")) - except ValueError, e: - raise FailedToImplement(space.w_ValueError, space.wrap(str(e))) - - return W_FloatObject(space, z) - -def neg__Float(space, w_float1): - return W_FloatObject(space, -w_float1.floatval) - -def pos__Float(space, w_float): - return float__Float(space, w_float) - -def abs__Float(space, w_float): - return W_FloatObject(space, abs(w_float.floatval)) - -def nonzero__Float(space, w_float): - return space.newbool(w_float.floatval != 0.0) - -######## coersion must be done later -later = """ -def float_coerce(space, w_float): - if w_float.__class__ == W_FloatObject: - return w_float - else: - return W_FloatObject(space, w_float.floatval) - -StdObjSpace.coerce.register(float_coerce, W_FloatObject) -""" - -gateway.importall(globals()) -register_all(vars()) diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py deleted file mode 100644 index 579aec9ee3..0000000000 --- a/pypy/objspace/std/floattype.py +++ /dev/null @@ -1,33 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from pypy.interpreter.error import OperationError - -def descr__new__(space, w_floattype, w_value=None): - from pypy.objspace.std.floatobject import W_FloatObject - if w_value is None: - value = 0.0 - elif space.is_true(space.isinstance(w_value, space.w_str)): - try: - value = float(space.unwrap(w_value)) - except ValueError, e: - raise OperationError(space.w_ValueError, - space.wrap(str(e))) - else: - w_obj = space.float(w_value) - if space.is_true(space.is_(w_floattype, space.w_float)): - return w_obj # 'float(x)' should return - # whatever x.__float__() returned - value = space.unwrap(w_obj) - if isinstance(value, int): # XXX typechecking in unwrap! - value = float(value) - if not isinstance(value, float): # XXX typechecking in unwrap! - raise OperationError(space.w_ValueError, - space.wrap("value can't be converted to float")) - w_obj = space.allocate_instance(W_FloatObject, w_floattype) - w_obj.__init__(space, value) - return w_obj - -# ____________________________________________________________ - -float_typedef = StdTypeDef("float", - __new__ = newmethod(descr__new__), - ) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py deleted file mode 100644 index 73a5c6155e..0000000000 --- a/pypy/objspace/std/intobject.py +++ /dev/null @@ -1,439 +0,0 @@ -from pypy.objspace.std.objspace import * -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.restricted_int import r_int, LONG_BIT - -""" -The implementation of integers is a bit difficult, -since integers are currently undergoing the change to turn -themselves into longs under overflow circumstances. -The restricted Python does not overflow or throws -exceptions. -The definitions in this file are fine, given that -restricted Python integers behave that way. -But for testing, the resticted stuff must be run -by CPython which has different behavior. -For that reason, I defined an r_int extension class -for native integers, which tries to behave as in -RPython, just for test purposes. -""" - -class W_IntObject(W_Object): - from pypy.objspace.std.inttype import int_typedef as typedef - - def __init__(w_self, space, intval): - W_Object.__init__(w_self, space) - w_self.intval = r_int(intval) - - def __repr__(w_self): - """ representation for debugging purposes """ - return "%s(%d)" % (w_self.__class__.__name__, w_self.intval) - - -registerimplementation(W_IntObject) - - -""" -XXX not implemented: -free list -FromString -FromUnicode -print -""" - -def unwrap__Int(space, w_int1): - return int(w_int1.intval) - -def repr__Int(space, w_int1): - a = w_int1.intval - res = str(a) - return space.wrap(res) - -str__Int = repr__Int - -## deprecated -## we are going to support rich compare, only - -##def int_int_cmp(space, w_int1, w_int2): -## i = w_int1.intval -## j = w_int2.intval -## if i < j: -## ret = -1 -## elif i > j: -## ret = 1 -## else: -## ret = 0 -## return W_IntObject(space, ret) -## -##StdObjSpace.cmp.register(int_int_cmp, W_IntObject, W_IntObject) - -def lt__Int_Int(space, w_int1, w_int2): - i = w_int1.intval - j = w_int2.intval - return space.newbool( i < j ) - -def le__Int_Int(space, w_int1, w_int2): - i = w_int1.intval - j = w_int2.intval - return space.newbool( i <= j ) - -def eq__Int_Int(space, w_int1, w_int2): - i = w_int1.intval - j = w_int2.intval - return space.newbool( i == j ) - -def ne__Int_Int(space, w_int1, w_int2): - i = w_int1.intval - j = w_int2.intval - return space.newbool( i != j ) - -def gt__Int_Int(space, w_int1, w_int2): - i = w_int1.intval - j = w_int2.intval - return space.newbool( i > j ) - -def ge__Int_Int(space, w_int1, w_int2): - i = w_int1.intval - j = w_int2.intval - return space.newbool( i >= j ) - -STRICT_HASH = True # temporary, to be put elsewhere or removed - -def _hash_strict(space, w_int1): - #/* XXX If this is changed, you also need to change the way - # Python's long, float and complex types are hashed. */ - x = w_int1.intval - if x == -1: - x = -2 - return W_IntObject(space, x) - -def _hash_liberal(space, w_int1): - # Armin: unlike CPython we have no need to special-case the value -1 - return w_int1 - -# Chris: I'm not yet convinced that we want to make hash() -# return different values that CPython does. -# So for the moment, both versions are here, -# and we might think of some config options -# or decide to drop compatibility (using pypy-dev). - -def hash__Int(space, w_int1): - if STRICT_HASH: - return _hash_strict(space, w_int1) - else: - return _hash_liberal(space, w_int1) - -def add__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = x + y - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer addition")) - return W_IntObject(space, z) - -def sub__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = x - y - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer substraction")) - return W_IntObject(space, z) - -def mul__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = x * y - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer multiplication")) - return W_IntObject(space, z) - -def _floordiv(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = x // y - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer division by zero")) - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer division")) - return W_IntObject(space, z) - -def _truediv(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = x // y - t = x % y - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer division by zero")) - except OverflowError: - return space.div(space.newfloat(float(x)), w_int2) - if t != 0: # gives a float - return space.div(space.newfloat(float(x)), w_int2) - return W_IntObject(space, z) - -def mod__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = x % y - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer modulo by zero")) - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer modulo")) - return W_IntObject(space, z) - -def divmod__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = x // y - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer divmod by zero")) - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer modulo")) - # no overflow possible - m = x % y - return space.wrap((z,m)) - -def div__Int_Int(space, w_int1, w_int2): - # Select the proper div - if 1 / 2 == 1 // 2: - return _floordiv(space, w_int1, w_int2) - else: - return _truediv(space, w_int1, w_int2) - -floordiv__Int_Int = _floordiv - -# helper for pow() -def _impl_int_int_pow(space, iv, iw, iz=None): - if iw < 0: - if iz is not None: - raise OperationError(space.w_TypeError, - space.wrap("pow() 2nd argument " - "cannot be negative when 3rd argument specified")) - return space.pow(space.float(space.wrap(iv)), - space.float(space.wrap(iw)), - space.w_None) - ## bounce it, since it always returns float - raise FailedToImplement(space.w_ValueError, - space.wrap("integer exponentiation")) - if iz is not None: - if iz == 0: - raise OperationError(space.w_ValueError, - space.wrap("pow() 3rd argument cannot be 0")) - temp = iv - ix = 1 - try: - while iw > 0: - if iw & 1: - ix = ix*temp - iw >>= 1 #/* Shift exponent down by 1 bit */ - if iw==0: - break - temp *= temp #/* Square the value of temp */ - if iz: - #/* If we did a multiplication, perform a modulo */ - ix = ix % iz; - temp = temp % iz; - if iz: - ix = ix % iz - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer exponentiation")) - return W_IntObject(space, ix) - -""" -def pow__Int_Int_Int(space, w_int1, w_int2, w_int3): - x = w_int1.intval - y = w_int2.intval - z = w_int3.intval - ret = _impl_int_int_pow(space, x, y, z) - return W_IntObject(space, ret) -""" - -def pow__Int_Int_Int(space, w_int1, w_int2, w_int3): - x = w_int1.intval - y = w_int2.intval - z = w_int3.intval - return _impl_int_int_pow(space, x, y, z) - -def pow__Int_Int_None(space, w_int1, w_int2, w_int3): - x = w_int1.intval - y = w_int2.intval - return _impl_int_int_pow(space, x, y) - -def neg__Int(space, w_int1): - a = w_int1.intval - try: - x = -a - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer negation")) - return W_IntObject(space, x) - -# pos__Int is supposed to do nothing, unless it has -# a derived integer object, where it should return -# an exact one. -def pos__Int(space, w_int1): - return int__Int(space, w_int1) - -def abs__Int(space, w_int1): - if w_int1.intval >= 0: - return pos__Int(space, w_int1) - else: - return neg__Int(space, w_int1) - -def nonzero__Int(space, w_int1): - return space.newbool(w_int1.intval != 0) - -def invert__Int(space, w_int1): - x = w_int1.intval - a = ~x - return W_IntObject(space, a) - -def lshift__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - if a == 0 or b == 0: - return int__Int(space, w_int1) - if b >= LONG_BIT: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer left shift")) - ## - ## XXX please! have a look into pyport.h and see how to implement - ## the overflow checking, using macro Py_ARITHMETIC_RIGHT_SHIFT - ## we *assume* that the overflow checking is done correctly - ## in the code generator, which is not trivial! - - ## XXX also note that Python 2.3 returns a long and never raises - ## OverflowError. - try: - c = a << b - ## the test in C code is - ## if (a != Py_ARITHMETIC_RIGHT_SHIFT(long, c, b)) { - ## if (PyErr_Warn(PyExc_FutureWarning, - # and so on - except OverflowError: - raise FailedToImplement(space.w_OverflowError, - space.wrap("integer left shift")) - return W_IntObject(space, c) - -def rshift__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - if a == 0 or b == 0: - return int__Int(space, w_int1) - if b >= LONG_BIT: - if a < 0: - a = -1 - else: - a = 0 - else: - ## please look into pyport.h, how >> should be implemented! - ## a = Py_ARITHMETIC_RIGHT_SHIFT(long, a, b); - a = a >> b - return W_IntObject(space, a) - -def and__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - res = a & b - return W_IntObject(space, res) - -def xor__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - res = a ^ b - return W_IntObject(space, res) - -def or__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - res = a | b - return W_IntObject(space, res) - -# coerce is not wanted -## -##static int -##coerce__Int(PyObject **pv, PyObject **pw) -##{ -## if (PyInt_Check(*pw)) { -## Py_INCREF(*pv); -## Py_INCREF(*pw); -## return 0; -## } -## return 1; /* Can't do it */ -##} - -# int__Int is supposed to do nothing, unless it has -# a derived integer object, where it should return -# an exact one. -def int__Int(space, w_int1): - if space.is_true(space.is_(space.type(w_int1), space.w_int)): - return w_int1 - a = w_int1.intval - return W_IntObject(space, a) - -""" -# Not registered -def long__Int(space, w_int1): - a = w_int1.intval - x = long(a) ## XXX should this really be done so? - return space.newlong(x) -""" - -def float__Int(space, w_int1): - a = w_int1.intval - x = float(a) - return space.newfloat(x) - -def oct__Int(space, w_int1): - x = w_int1.intval - if x < 0: - ## XXX what about this warning? - #if (PyErr_Warn(PyExc_FutureWarning, - # "hex()/oct() of negative int will return " - # "a signed string in Python 2.4 and up") < 0) - # return NULL; - pass - if x == 0: - ret = "0" - else: - ret = "0%lo" % x - return space.wrap(ret) - -def hex__Int(space, w_int1): - x = w_int1.intval - if x < 0: - ## XXX what about this warning? - #if (PyErr_Warn(PyExc_FutureWarning, - # "hex()/oct() of negative int will return " - # "a signed string in Python 2.4 and up") < 0) - # return NULL; - pass - if x == 0: - ret = "0" - else: - ret = "0x%lx" % x - return space.wrap(ret) - -register_all(vars()) diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py deleted file mode 100644 index d843ff7157..0000000000 --- a/pypy/objspace/std/inttype.py +++ /dev/null @@ -1,60 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.strutil import string_to_int -from pypy.interpreter.error import OperationError - -def descr__new__(space, w_inttype, w_value=None, w_base=None): - from pypy.objspace.std.intobject import W_IntObject - if w_base is None: - # check for easy cases - if w_value is None: - value = 0 - elif isinstance(w_value, W_IntObject): - value = w_value.intval - elif space.is_true(space.isinstance(w_value, space.w_str)): - try: - value = string_to_int(space.unwrap(w_value)) - except ValueError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.args[0])) - else: - # otherwise, use the __int__() method - w_obj = space.int(w_value) - # 'int(x)' should return whatever x.__int__() returned - if space.is_true(space.is_(w_inttype, space.w_int)): - return w_obj - value = space.unwrap(w_obj) - if not isinstance(value, (int, long)): # XXX typechecking in unwrap! - raise OperationError(space.w_ValueError, - space.wrap("value can't be converted to int")) - else: - base = space.unwrap(w_base) - if not isinstance(base, int): # XXX typechecking in unwrap! - raise OperationError(space.w_TypeError, - space.wrap("an integer is required")) - s = space.unwrap(w_value) - if not isinstance(s, str): # XXX typechecking in unwrap! - raise OperationError(space.w_TypeError, - space.wrap("int() can't convert non-string " - "with explicit base")) - try: - value = string_to_int(s, base) - except ValueError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.args[0])) - - if isinstance(value, long): - # XXX is this right?? - from pypy.objspace.std.longobject import W_LongObject - w_obj = space.allocate_instance(W_LongObject, space.w_long) - w_obj.__init__(space, value) - return w_obj - else: - w_obj = space.allocate_instance(W_IntObject, w_inttype) - w_obj.__init__(space, value) - return w_obj - -# ____________________________________________________________ - -int_typedef = StdTypeDef("int", - __new__ = newmethod(descr__new__), - ) diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py deleted file mode 100644 index 4b832450ec..0000000000 --- a/pypy/objspace/std/iterobject.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Reviewed 03-06-22 -Sequence-iteration is correctly implemented, thoroughly -tested, and complete. The only missing feature is support -for function-iteration. -""" -from pypy.objspace.std.objspace import * - - -class W_SeqIterObject(W_Object): - from pypy.objspace.std.itertype import iter_typedef as typedef - - def __init__(w_self, space, w_seq, index=0): - W_Object.__init__(w_self, space) - w_self.w_seq = w_seq - w_self.index = index - - -registerimplementation(W_SeqIterObject) - - -def iter__SeqIter(space, w_seqiter): - return w_seqiter - -def next__SeqIter(space, w_seqiter): - try: - w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index)) - except OperationError, e: - if not e.match(space, space.w_IndexError): - raise - raise OperationError(space.w_StopIteration, space.w_None) - w_seqiter.index += 1 - return w_item - -register_all(vars()) diff --git a/pypy/objspace/std/itertype.py b/pypy/objspace/std/itertype.py deleted file mode 100644 index 677a7b60da..0000000000 --- a/pypy/objspace/std/itertype.py +++ /dev/null @@ -1,6 +0,0 @@ -from pypy.objspace.std.stdtypedef import * - -# ____________________________________________________________ - -iter_typedef = StdTypeDef("sequence-iterator", - ) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py deleted file mode 100644 index 7be664ddd5..0000000000 --- a/pypy/objspace/std/listobject.py +++ /dev/null @@ -1,554 +0,0 @@ -from pypy.objspace.std.objspace import * -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.sliceobject import W_SliceObject -from pypy.objspace.std.tupleobject import W_TupleObject - -from pypy.objspace.std import slicetype -from pypy.interpreter import gateway -from pypy.objspace.std.restricted_int import r_int, r_uint - - -class W_ListObject(W_Object): - from pypy.objspace.std.listtype import list_typedef as typedef - - def __init__(w_self, space, wrappeditems): - W_Object.__init__(w_self, space) - w_self.ob_item = [] - w_self.ob_size = 0 - newlen = len(wrappeditems) - _list_resize(w_self, newlen) - w_self.ob_size = newlen - items = w_self.ob_item - p = newlen - while p: - p -= 1 - items[p] = wrappeditems[p] - - def __repr__(w_self): - """ representation for debugging purposes """ - reprlist = [repr(w_item) for w_item in w_self.ob_item[:w_self.ob_size]] - return "%s(%s)" % (w_self.__class__.__name__, ', '.join(reprlist)) - - -registerimplementation(W_ListObject) - - -def unwrap__List(space, w_list): - items = [space.unwrap(w_item) for w_item in w_list.ob_item[:w_list.ob_size]] - return list(items) - -def init__List(space, w_list, w_args, w_kwds): - if space.is_true(w_kwds): - raise OperationError(space.w_TypeError, - space.wrap("no keyword arguments expected")) - w_list.ob_size = 0 # XXX think about it later - args = space.unpackiterable(w_args) - if len(args) == 0: - pass # empty list - elif len(args) == 1: - w_iterable = args[0] - w_iterator = space.iter(w_iterable) - while True: - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - _ins1(w_list, w_list.ob_size, w_item) - else: - raise OperationError(space.w_TypeError, - space.wrap("list() takes at most 1 argument")) - -def len__List(space, w_list): - result = w_list.ob_size - return W_IntObject(space, result) - -def getitem__List_Int(space, w_list, w_index): - items = w_list.ob_item - idx = w_index.intval - if idx < 0: - idx += w_list.ob_size - if idx < 0 or idx >= w_list.ob_size: - raise OperationError(space.w_IndexError, - space.wrap("list index out of range")) - w_item = items[idx] - return w_item - -def getitem__List_Slice(space, w_list, w_slice): - items = w_list.ob_item - length = w_list.ob_size - start, stop, step, slicelength = slicetype.indices4(space, w_slice, length) - assert slicelength >= 0 - w_res = W_ListObject(space, []) - _list_resize(w_res, slicelength) - subitems = w_res.ob_item - for i in range(slicelength): - subitems[i] = items[start] - start += step - w_res.ob_size = slicelength - return w_res - -def iter__List(space, w_list): - from pypy.objspace.std import iterobject - return iterobject.W_SeqIterObject(space, w_list) - -def add__List_List(space, w_list1, w_list2): - w_res = W_ListObject(space, []) - newlen = w_list1.ob_size + w_list2.ob_size - _list_resize(w_res, newlen) - p = 0 - items = w_res.ob_item - src = w_list1.ob_item - for i in range(w_list1.ob_size): - items[p] = src[i] - p += 1 - src = w_list2.ob_item - for i in range(w_list2.ob_size): - items[p] = src[i] - p += 1 - w_res.ob_size = p - return w_res - -def inplace_add__List_ANY(space, w_list1, w_iterable2): - list_extend__List_ANY(space, w_list1, w_iterable2) - return w_list1 - -def mul__List_Int(space, w_list, w_int): - w_res = W_ListObject(space, []) - times = w_int.intval - src = w_list.ob_item - size = w_list.ob_size - newlen = size * times # XXX check overflow - _list_resize(w_res, newlen) - items = w_res.ob_item - p = 0 - for _ in range(times): - for i in range(size): - items[p] = src[i] - p += 1 - w_res.ob_size = p - return w_res - -def mul__Int_List(space, w_int, w_list): - return mul__List_Int(space, w_list, w_int) - -def eq__List_List(space, w_list1, w_list2): - items1 = w_list1.ob_item - items2 = w_list2.ob_item - if w_list1.ob_size != w_list2.ob_size: - return space.w_False - for i in range(w_list1.ob_size): - if not space.is_true(space.eq(items1[i], items2[i])): - return space.w_False - return space.w_True - -def _min(a, b): - if a < b: - return a - return b - -def lt__List_List(space, w_list1, w_list2): - items1 = w_list1.ob_item - items2 = w_list2.ob_item - ncmp = _min(w_list1.ob_size, w_list2.ob_size) - # Search for the first index where items are different - for p in range(ncmp): - if not space.is_true(space.eq(items1[p], items2[p])): - return space.lt(items1[p], items2[p]) - # No more items to compare -- compare sizes - return space.newbool(w_list1.ob_size < w_list2.ob_size) - -def gt__List_List(space, w_list1, w_list2): - items1 = w_list1.ob_item - items2 = w_list2.ob_item - ncmp = _min(w_list1.ob_size, w_list2.ob_size) - # Search for the first index where items are different - for p in range(ncmp): - if not space.is_true(space.eq(items1[p], items2[p])): - return space.gt(items1[p], items2[p]) - # No more items to compare -- compare sizes - return space.newbool(w_list1.ob_size > w_list2.ob_size) - -# upto here, lists are nearly identical to tuples, despite the -# fact that we now support over-allocation! - -def delitem__List_Int(space, w_list, w_idx): - i = w_idx.intval - if i < 0: - i += w_list.ob_size - if i < 0 or i >= w_list.ob_size: - raise OperationError(space.w_IndexError, - space.wrap("list deletion index out of range")) - _del_slice(w_list, i, i+1) - return space.w_None - -def delitem__List_Slice(space, w_list, w_slice): - start, stop, step, slicelength = slicetype.indices4(space, w_slice, w_list.ob_size) - if step == 1: - return _setitem_slice_helper(space, w_list, w_slice, [], 0) - - # The current code starts from the top, to simplify - # coding. A later optimization could be to start from - # the bottom, which would reduce the list motion. - # A further later optimization would be to special-case - # a step of -1, because this version will perform a LOT - # of extra motion for this case. Anybody with a real-life - # use-case for this is welcome to write the special case. - r = range(start, stop, step) - if step > 0: - r.reverse() - for i in r: - _del_slice(w_list, i, i+1) - return space.w_None - -def setitem__List_Int_ANY(space, w_list, w_index, w_any): - items = w_list.ob_item - idx = w_index.intval - if idx < 0: - idx += w_list.ob_size - if idx < 0 or idx >= w_list.ob_size: - raise OperationError(space.w_IndexError, - space.wrap("list index out of range")) - items[idx] = w_any - return space.w_None - -def setitem__List_Slice_List(space, w_list, w_slice, w_list2): - return _setitem_slice_helper(space, w_list, w_slice, w_list2.ob_item, w_list2.ob_size) - -def setitem__List_Slice_Tuple(space, w_list, w_slice, w_tuple): - t = w_tuple.wrappeditems - return _setitem_slice_helper(space, w_list, w_slice, t, len(t)) - -def _setitem_slice_helper(space, w_list, w_slice, sequence2, len2): - start, stop, step, slicelength = slicetype.indices4(space, w_slice, w_list.ob_size) - assert slicelength >= 0 - - if step == 1: # Support list resizing for non-extended slices - oldsize = w_list.ob_size - delta = len2 - slicelength - newsize = oldsize + delta - _list_resize(w_list, newsize) - w_list.ob_size = newsize - r = range(stop+delta, newsize) - if delta > 0: - r.reverse() - items = w_list.ob_item - for i in r: - items[i] = items[i-delta] - elif len2 != slicelength: # No resize for extended slices - raise OperationError(space.w_ValueError, space.wrap("attempt to " - "assign sequence of size %d to extended slice of size %d" % - (len2,slicelength))) - - r = range(len2) - items = w_list.ob_item - if sequence2 is items: - if step > 0: - # Always copy starting from the right to avoid - # having to make a shallow copy in the case where - # the source and destination lists are the same list. - r.reverse() - else: - # Make a shallow copy to more easily handle the reversal case - sequence2 = list(sequence2) - for i in r: - items[start+i*step] = sequence2[i] - return space.w_None - -def app_repr__List(l): - return "[" + ", ".join([repr(x) for x in l]) + ']' - -def hash__List(space,w_list): - raise OperationError(space.w_TypeError,space.wrap("list objects are unhashable")) - -# adapted C code -def _roundupsize(n): - nbits = r_uint(0) - n2 = n >> 5 - -## /* Round up: -## * If n < 256, to a multiple of 8. -## * If n < 2048, to a multiple of 64. -## * If n < 16384, to a multiple of 512. -## * If n < 131072, to a multiple of 4096. -## * If n < 1048576, to a multiple of 32768. -## * If n < 8388608, to a multiple of 262144. -## * If n < 67108864, to a multiple of 2097152. -## * If n < 536870912, to a multiple of 16777216. -## * ... -## * If n < 2**(5+3*i), to a multiple of 2**(3*i). -## * -## * This over-allocates proportional to the list size, making room -## * for additional growth. The over-allocation is mild, but is -## * enough to give linear-time amortized behavior over a long -## * sequence of appends() in the presence of a poorly-performing -## * system realloc() (which is a reality, e.g., across all flavors -## * of Windows, with Win9x behavior being particularly bad -- and -## * we've still got address space fragmentation problems on Win9x -## * even with this scheme, although it requires much longer lists to -## * provoke them than it used to). -## */ - while 1: - n2 >>= 3 - nbits += 3 - if not n2 : - break - return ((n >> nbits) + 1) << nbits - -# before we have real arrays, -# we use lists, allocated to fixed size. -# XXX memory overflow is ignored here. -# See listobject.c for reference. - -for_later = """ -#define NRESIZE(var, type, nitems) \ -do { \ - size_t _new_size = _roundupsize(nitems); \ - if (_new_size <= ((~(size_t)0) / sizeof(type))) \ - PyMem_RESIZE(var, type, _new_size); \ - else \ - var = NULL; \ -} while (0) -""" - -def _list_resize(w_list, newlen): - if newlen > len(w_list.ob_item): - true_size = _roundupsize(newlen) - old_items = w_list.ob_item - w_list.ob_item = items = [None] * true_size - for p in range(len(old_items)): - items[p] = old_items[p] - -def _ins1(w_list, where, w_any): - _list_resize(w_list, w_list.ob_size+1) - size = w_list.ob_size - items = w_list.ob_item - if where < 0: - where += size - if where < 0: - where = 0 - if (where > size): - where = size - for i in range(size, where, -1): - items[i] = items[i-1] - items[where] = w_any - w_list.ob_size += 1 - -def list_insert__List_Int_ANY(space, w_list, w_where, w_any): - _ins1(w_list, w_where.intval, w_any) - return space.w_None - -def list_append__List_ANY(space, w_list, w_any): - _ins1(w_list, w_list.ob_size, w_any) - return space.w_None - -def list_extend__List_ANY(space, w_list, w_any): - lis = space.unpackiterable(w_any) - newlen = w_list.ob_size + len(lis) - _list_resize(w_list, newlen) - d = w_list.ob_size - items = w_list.ob_item - for i in range(len(lis)): - items[d+i] = lis[i] - w_list.ob_size = newlen - return space.w_None - -def _del_slice(w_list, ilow, ihigh): - """ similar to the deletion part of list_ass_slice in CPython """ - if ilow < 0: - ilow = 0 - elif ilow > w_list.ob_size: - ilow = w_list.ob_size - if ihigh < ilow: - ihigh = ilow - elif ihigh > w_list.ob_size: - ihigh = w_list.ob_size - items = w_list.ob_item - d = ihigh-ilow - # XXX this is done by CPython to hold the elements - # to be deleted. I have no idea how to express - # this here, but we need to be aware when we write - # a compiler. - # recycle = [items[i] for i in range(ilow, ihigh)] - for i in range(ilow, w_list.ob_size - d): - items[i] = items[i+d] - items[i+d] = None - w_list.ob_size -= d - -# note that the default value will come back wrapped!!! -def list_pop__List_Int(space, w_list, w_idx=-1): - if w_list.ob_size == 0: - raise OperationError(space.w_IndexError, - space.wrap("pop from empty list")) - i = w_idx.intval - if i < 0: - i += w_list.ob_size - if i < 0 or i >= w_list.ob_size: - raise OperationError(space.w_IndexError, - space.wrap("pop index out of range")) - w_res = w_list.ob_item[i] - _del_slice(w_list, i, i+1) - return w_res - -def list_remove__List_ANY(space, w_list, w_any): - eq = space.eq - items = w_list.ob_item - for i in range(w_list.ob_size): - cmp = eq(items[i], w_any) - if space.is_true(cmp): - _del_slice(w_list, i, i+1) - return space.w_None - raise OperationError(space.w_ValueError, - space.wrap("list.remove(x): x not in list")) - -def list_index__List_ANY_ANY_ANY(space, w_list, w_any, w_start, w_stop): - eq = space.eq - items = w_list.ob_item - size = w_list.ob_size - start = space.unwrap(w_start) # XXX type check: int or clamped long - if start < 0: - start += size - start = min(max(0,start),size) - stop = space.unwrap(w_stop) # XXX type check: int or clamped long - if stop < 0: - stop += size - stop = min(max(start,stop),size) - - for i in range(start,stop): - cmp = eq(items[i], w_any) - if space.is_true(cmp): - return space.wrap(i) - raise OperationError(space.w_ValueError, - space.wrap("list.index(x): x not in list")) - -def list_count__List_ANY(space, w_list, w_any): - eq = space.eq - items = w_list.ob_item - count = r_int(0) - for i in range(w_list.ob_size): - cmp = eq(items[i], w_any) - if space.is_true(cmp): - count += 1 - return space.wrap(count) - -# Reverse a slice of a list in place, from lo up to (exclusive) hi. -# (also used in sort, later) - -def _reverse_slice(lis, lo, hi): - hi -= 1 - while lo < hi: - t = lis[lo] - lis[lo] = lis[hi] - lis[hi] = t - lo += 1 - hi -= 1 - -def list_reverse__List(space, w_list): - if w_list.ob_size > 1: - _reverse_slice(w_list.ob_item, 0, w_list.ob_size) - return space.w_None - - - -# Python Quicksort Written by Magnus Lie Hetland -# http://www.hetland.org/python/quicksort.html - -# NOTE: we cannot yet detect that a user comparision -# function modifies the list in-place. The -# CPython sort() should be studied to learn how -# to implement this functionality. - -def _partition(list, start, end, lt): - pivot = list[end] # Partition around the last value - bottom = start-1 # Start outside the area to be partitioned - top = end # Ditto - - done = 0 - while not done: # Until all elements are partitioned... - - while not done: # Until we find an out of place element... - bottom = bottom+1 # ... move the bottom up. - - if bottom == top: # If we hit the top... - done = 1 # ... we are done. - break - - if lt(pivot, list[bottom]): # Is the bottom out of place? - list[top] = list[bottom] # Then put it at the top... - break # ... and start searching from the top. - - while not done: # Until we find an out of place element... - top = top-1 # ... move the top down. - - if top == bottom: # If we hit the bottom... - done = 1 # ... we are done. - break - - if lt(list[top], pivot): # Is the top out of place? - list[bottom] = list[top] # Then put it at the bottom... - break # ...and start searching from the bottom. - - list[top] = pivot # Put the pivot in its place. - return top # Return the split point - - -def _quicksort(list, start, end, lt): - if start < end: # If there are two or more elements... - split = _partition(list, start, end, lt) # ... partition the sublist... - _quicksort(list, start, split-1, lt) # ... and sort both halves. - _quicksort(list, split+1, end, lt) - -class Comparer: - """Just a dumb container class for a space and a w_cmp, because - we can't use nested scopes for that in RPython. - """ - def __init__(self, space, w_cmp): - self.space = space - self.w_cmp = w_cmp - - def simple_lt(self, a, b): - space = self.space - return space.is_true(space.lt(a, b)) - - def complex_lt(self, a, b): - space = self.space - w_cmp = self.w_cmp - result = space.unwrap(space.call_function(w_cmp, a, b)) - if not isinstance(result,int): - raise OperationError(space.w_TypeError, - space.wrap("comparison function must return int")) - return result < 0 - -def list_sort__List_ANY(space, w_list, w_cmp): - comparer = Comparer(space, w_cmp) - if w_cmp is space.w_None: - lt = comparer.simple_lt - else: - lt = comparer.complex_lt - - # XXX Basic quicksort implementation - # XXX this is not stable !! - _quicksort(w_list.ob_item, 0, w_list.ob_size-1, lt) - return space.w_None - - -""" -static PyMethodDef list_methods[] = { - {"append", (PyCFunction)listappend, METH_O, append_doc}, - {"insert", (PyCFunction)listinsert, METH_VARARGS, insert_doc}, - {"extend", (PyCFunction)listextend, METH_O, extend_doc}, - {"pop", (PyCFunction)listpop, METH_VARARGS, pop_doc}, - {"remove", (PyCFunction)listremove, METH_O, remove_doc}, - {"index", (PyCFunction)listindex, METH_O, index_doc}, - {"count", (PyCFunction)listcount, METH_O, count_doc}, - {"reverse", (PyCFunction)listreverse, METH_NOARGS, reverse_doc}, - {"sort", (PyCFunction)listsort, METH_VARARGS, sort_doc}, - {NULL, NULL} /* sentinel */ -}; -""" - -from pypy.interpreter import gateway -gateway.importall(globals()) -from pypy.objspace.std import listtype -register_all(vars(), listtype) diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py deleted file mode 100644 index 8ec7327053..0000000000 --- a/pypy/objspace/std/listtype.py +++ /dev/null @@ -1,27 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from sys import maxint - -list_append = MultiMethod('append', 2) -list_insert = MultiMethod('insert', 3) -list_extend = MultiMethod('extend', 2) -list_pop = MultiMethod('pop', 2, defaults=(-1,)) -list_remove = MultiMethod('remove', 2) -list_index = MultiMethod('index', 4, defaults=(0,maxint)) -list_count = MultiMethod('count', 2) -list_reverse= MultiMethod('reverse',1) -list_sort = MultiMethod('sort', 2, defaults=(None,)) - -# ____________________________________________________________ - -def descr__new__(space, w_listtype, __args__): - from pypy.objspace.std.listobject import W_ListObject - w_obj = space.allocate_instance(W_ListObject, w_listtype) - w_obj.__init__(space, []) - return w_obj - -# ____________________________________________________________ - -list_typedef = StdTypeDef("list", - __new__ = newmethod(descr__new__), - ) -list_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py deleted file mode 100644 index 3eff5e8c2d..0000000000 --- a/pypy/objspace/std/longobject.py +++ /dev/null @@ -1,236 +0,0 @@ -import sys -from pypy.objspace.std.objspace import * -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.floatobject import W_FloatObject -from pypy.objspace.std.noneobject import W_NoneObject - -class W_LongObject(W_Object): - """This is a non-reimplementation of longs. - It uses real CPython longs. - XXX we must really choose another representation (e.g. list of ints) - XXX and implement it in detail. - """ - from pypy.objspace.std.longtype import long_typedef as typedef - - def __init__(w_self, space, longval=0L): - W_Object.__init__(w_self, space) - assert isinstance(longval, long) - w_self.longval = longval - -registerimplementation(W_LongObject) - - -# long__Long is supposed to do nothing, unless it has -# a derived long object, where it should return -# an exact one. -def long__Long(space, w_long1): - if space.is_true(space.is_(space.type(w_long1), space.w_long)): - return w_long1 - a = w_long1.longval - return W_LongObject(space, a) - -def long__Int(space, w_intobj): - return W_LongObject(space, long(w_intobj.intval)) - -def int__Long(space, w_value): - if -sys.maxint-1 <= w_value.longval <= sys.maxint: - return space.newint(int(w_value.longval)) - else: - return w_value # 9999999999999L.__int__() == 9999999999999L - -def float__Long(space, w_longobj): - return space.newfloat(float(w_longobj.longval)) - -def long__Float(space, w_floatobj): - return W_LongObject(space, long(w_floatobj.floatval)) - -def unwrap__Long(space, w_long): - return w_long.longval - -def repr__Long(space, w_long): - return space.wrap(repr(w_long.longval)) - -def str__Long(space, w_long): - return space.wrap(str(w_long.longval)) - -def eq__Long_Long(space, w_long1, w_long2): - i = w_long1.longval - j = w_long2.longval - return space.newbool( i == j ) - -def lt__Long_Long(space, w_long1, w_long2): - i = w_long1.longval - j = w_long2.longval - return space.newbool( i < j ) - -def hash__Long(space,w_value): - ## %reimplement% - # real Implementation should be taken from _Py_HashDouble in object.c - return space.wrap(hash(w_value.longval)) - -def add__Long_Long(space, w_long1, w_long2): - x = w_long1.longval - y = w_long2.longval - z = x + y - return W_LongObject(space, z) - -def sub__Long_Long(space, w_long1, w_long2): - x = w_long1.longval - y = w_long2.longval - z = x - y - return W_LongObject(space, z) - -def mul__Long_Long(space, w_long1, w_long2): - x = w_long1.longval - y = w_long2.longval - z = x * y - return W_LongObject(space, z) - -def div__Long_Long(space, w_long1, w_long2): - x = w_long1.longval - y = w_long2.longval - if not y: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("long division")) - z = x / y - return W_LongObject(space, z) - -def floordiv__Long_Long(space, w_long1, w_long2): - x = w_long1.longval - y = w_long2.longval - if not y: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("long division")) - z = x // y - return W_LongObject(space, z) - -def mod__Long_Long(space, w_long1, w_long2): - x = w_long1.longval - y = w_long2.longval - if not y: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("long modulo")) - z = x % y - return W_LongObject(space, z) - -def divmod__Long_Long(space, w_long1, w_long2): - x = w_long1.longval - y = w_long2.longval - if not y: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("long modulo")) - z1, z2 = divmod(x, y) - return space.newtuple([W_LongObject(space, z1), - W_LongObject(space, z2)]) - -def pow__Long_Long_None(space, w_long1, w_long2, w_none3): - x = w_long1.longval - y = w_long2.longval - if y < 0: - return space.pow(space.float(w_long1), - space.float(w_long2), - space.w_None) - z = x ** y - return W_LongObject(space, z) - -def pow__Long_Long_Long(space, w_long1, w_long2, w_long3): - x = w_long1.longval - y = w_long2.longval - z = w_long2.longval - t = pow(x, y, z) - return W_LongObject(space, t) - -def neg__Long(space, w_long1): - return W_LongObject(space, -w_long1.longval) - -def pos__Long(space, w_long): - return long__Long(space, w_long) - -def abs__Long(space, w_long): - return W_LongObject(space, abs(w_long.longval)) - -def nonzero__Long(space, w_long): - return space.newbool(w_long.longval != 0L) - -def invert__Long(space, w_long): - return W_LongObject(space, ~w_long.longval) - -def lshift__Long_Int(space, w_long1, w_int2): - a = w_long1.longval - b = w_int2.intval - if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - res = a << b - return W_LongObject(space, res) - -def rshift__Long_Int(space, w_long1, w_int2): - a = w_long1.longval - b = w_int2.intval - if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - res = a >> b - return W_LongObject(space, res) - -def and__Long_Long(space, w_long1, w_long2): - a = w_long1.longval - b = w_long2.longval - res = a & b - return W_LongObject(space, res) - -def xor__Long_Long(space, w_long1, w_long2): - a = w_long1.longval - b = w_long2.longval - res = a ^ b - return W_LongObject(space, res) - -def or__Long_Long(space, w_long1, w_long2): - a = w_long1.longval - b = w_long2.longval - res = a | b - return W_LongObject(space, res) - -def oct__Long(space, w_long1): - x = w_long1.longval - return space.wrap(oct(x)) - -def hex__Long(space, w_long1): - x = w_long1.longval - return space.wrap(hex(x)) - - -register_all(vars()) - -# delegations must be registered manually because we have more than one -# long-to-something delegation - -# int-to-long delegation -def delegate_from_int(space, w_intobj): - return W_LongObject(space, long(w_intobj.intval)) -delegate_from_int.result_class = W_LongObject -delegate_from_int.priority = PRIORITY_CHANGE_TYPE - -StdObjSpace.delegate.register(delegate_from_int, W_IntObject) - -# long-to-int delegation -def delegate_to_int(space, w_longobj): - if -sys.maxint-1 <= w_longobj.longval <= sys.maxint: - return W_IntObject(space, int(w_longobj.longval)) - else: - # note the 'return' here -- hack - return FailedToImplement(space.w_OverflowError, - space.wrap("long int too large to convert to int")) -delegate_to_int.result_class = W_IntObject -delegate_to_int.priority = PRIORITY_CHANGE_TYPE -delegate_to_int.can_fail = True - -StdObjSpace.delegate.register(delegate_to_int, W_LongObject) - -# long-to-float delegation -def delegate_to_float(space, w_longobj): - return W_FloatObject(space, float(w_longobj.longval)) -delegate_to_float.result_class = W_FloatObject -delegate_to_float.priority = PRIORITY_CHANGE_TYPE - -StdObjSpace.delegate.register(delegate_to_float, W_LongObject) diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py deleted file mode 100644 index aad2acb463..0000000000 --- a/pypy/objspace/std/longtype.py +++ /dev/null @@ -1,59 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.strutil import string_to_long -from pypy.interpreter.error import OperationError -from pypy.objspace.std.inttype import int_typedef - -def descr__new__(space, w_longtype, w_value=None, w_base=None): - from pypy.objspace.std.longobject import W_LongObject - if w_base is None: - # check for easy cases - if w_value is None: - value = 0L - elif isinstance(w_value, W_LongObject): - value = w_value.longval - elif space.is_true(space.isinstance(w_value, space.w_str)): - try: - value = string_to_long(space.unwrap(w_value)) - except ValueError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.args[0])) - else: - # otherwise, use the __long__() method - w_obj = space.long(w_value) - # 'long(x)' should return whatever x.__long__() returned - if space.is_true(space.is_(w_longtype, space.w_long)): - return w_obj - value = space.unwrap(w_obj) - if isinstance(value, int): # XXX typechecking in unwrap! - value = long(value) - if not isinstance(value, long): - raise OperationError(space.w_ValueError, - space.wrap("value can't be converted to long")) - else: - base = space.unwrap(w_base) - if not isinstance(base, int): # XXX typechecking in unwrap! - raise OperationError(space.w_TypeError, - space.wrap("an integer is required")) - s = space.unwrap(w_value) - if not isinstance(s, str): # XXX typechecking in unwrap! - raise OperationError(space.w_TypeError, - space.wrap("long() can't convert non-string " - "with explicit base")) - try: - value = string_to_long(s, base) - except ValueError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.args[0])) - - w_obj = space.allocate_instance(W_LongObject, w_longtype) - w_obj.__init__(space, value) - return w_obj - -# ____________________________________________________________ - -long_typedef = StdTypeDef("long", - __new__ = newmethod(descr__new__), - ) -# hack to allow automatic int to long conversion: the int.__xyz__ methods -# will fall back to their long.__xyz__ counterparts if they fail -long_typedef.could_also_match = int_typedef diff --git a/pypy/objspace/std/mro.py b/pypy/objspace/std/mro.py deleted file mode 100644 index 03b9c5fd9d..0000000000 --- a/pypy/objspace/std/mro.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -For reference, the MRO algorithm of Python 2.3. -""" - - - -def mro(cls): - order = [] - orderlists = [mro(base) for base in cls.__bases__] - orderlists.append([cls] + list(cls.__bases__)) - while orderlists: - for candidatelist in orderlists: - candidate = candidatelist[0] - if blockinglist(candidate, orderlists) is None: - break # good candidate - else: - mro_error(orderlists) # no candidate found - assert candidate not in order - order.append(candidate) - for i in range(len(orderlists)-1, -1, -1): - if orderlists[i][0] == candidate: - del orderlists[i][0] - if len(orderlists[i]) == 0: - del orderlists[i] - return order - -def blockinglist(candidate, orderlists): - for lst in orderlists: - if candidate in lst[1:]: - return lst - return None # good candidate - -def mro_error(orderlists): - cycle = [] - candidate = orderlists[0][0] - while candidate not in cycle: - cycle.append(candidate) - nextblockinglist = blockinglist(candidate, orderlists) - candidate = nextblockinglist[0] - del cycle[:cycle.index(candidate)] - cycle.append(candidate) - cycle.reverse() - names = [cls.__name__ for cls in cycle] - raise TypeError, "Cycle among base classes: " + ' < '.join(names) - - -def mronames(cls): - names = [cls.__name__ for cls in mro(cls)] - return names - - -if __name__ == '__main__': - class ex_9: - #O = object - class O: pass - class A(O): pass - class B(O): pass - class C(O): pass - class D(O): pass - class E(O): pass - class F(O): pass - class K1(A,B,C): pass - class K2(D,F,B,E): pass - class K3(D,A): pass - class Z(K1,K2,F,K3): pass - class ZM(K1,K2,K3): pass - #print ZM.__mro__ - - print 'K1:', mronames(ex_9.K1) - print 'K2:', mronames(ex_9.K2) - print 'K3:', mronames(ex_9.K3) - print mronames(ex_9.ZM) - print mronames(ex_9.Z) diff --git a/pypy/objspace/std/multimethod.py b/pypy/objspace/std/multimethod.py deleted file mode 100644 index 04a35e1e05..0000000000 --- a/pypy/objspace/std/multimethod.py +++ /dev/null @@ -1,507 +0,0 @@ -from pypy.interpreter.baseobjspace import OperationError -from pypy.tool.cache import Cache - -class FailedToImplement(Exception): - "Signals the dispatcher to try harder." - -class W_ANY: - "Catch-all in case multimethods don't find anything else." - typedef = None - - -# This file defines three major classes: -# -# MultiMethod is the class you instantiate explicitly. -# It is essentially just a collection of registered functions. -# If xxx is a MultiMethod, StdObjSpace.xxx is xxx again, -# and space.xxx is a BoundMultiMethod. -# -# UnboundMultiMethod is a MultiMethod on which one argument -# (typically the first one) has been restricted to be of some -# statically known type. It is obtained by the syntax -# W_XxxType.yyy, where W_XxxType is the static type class, -# or explicitly by calling 'xxx.slice(typeclass, arg_position)'. -# Its dispatch table is always a subset of the original MultiMethod's -# dispatch table. -# -# The registration of a new function to a MultiMethod will be propagated -# to all of its matching UnboundMultiMethod instances. The registration of -# a function directly to an UnboundMultiMethod will register the function -# to its base MultiMethod and invoke the same behavior. -# -# BoundMultiMethod is a MultiMethod or UnboundMultiMethod which -# has been bound to a specific object space. It is obtained by -# 'space.xxx' or explicitly by calling 'xxx.get(space)'. - -class AbstractMultiMethod(object): - """Abstract base class for MultiMethod and UnboundMultiMethod - i.e. the classes that are not bound to a specific space instance.""" - - def __init__(self, operatorsymbol, arity): - "NOT_RPYTHON: cannot create new multimethods dynamically" - self.arity = arity - self.operatorsymbol = operatorsymbol - self.dispatch_table = {} - self.cache_table = Cache() - self.compilation_cache_table = {} - self.cache_delegator_key = None - self.dispatch_arity = 0 - - def __repr__(self): - return '<%s %s>' % (self.__class__.__name__, self.operatorsymbol) - - def register(self, function, *types): - """NOT_RPYTHON: cannot register new multimethod - implementations dynamically""" - assert len(types) == self.arity - functions = self.dispatch_table.setdefault(types, []) - if function in functions: - return False - functions.append(function) - self.cache_table.clear() - self.compilation_cache_table.clear() - self.adjust_dispatch_arity(types) - return True - - def adjust_dispatch_arity(self, types): - "NOT_RPYTHON" - width = len(types) - while width > self.dispatch_arity and types[width-1] is W_ANY: - width -= 1 - self.dispatch_arity = width - - def compile_calllist(self, argclasses, delegate): - """Compile a list of calls to try for the given classes of the - arguments. Return a function that should be called with the - actual arguments.""" - if delegate.key is not self.cache_delegator_key: - self.cache_table.clear() - self.compilation_cache_table.clear() - self.cache_delegator_key = delegate.key - return self.cache_table.getorbuild(argclasses, - self.do_compile_calllist, - delegate) - - def do_compile_calllist(self, argclasses, delegate): - "NOT_RPYTHON" - calllist = [] - self.internal_buildcalllist(argclasses, delegate, calllist) - calllist = tuple(calllist) - try: - result = self.compilation_cache_table[calllist] - except KeyError: - result = self.internal_compilecalllist(calllist) - self.compilation_cache_table[calllist] = result - return result - - def internal_compilecalllist(self, calllist): - "NOT_RPYTHON" - source, glob = self.internal_sourcecalllist(calllist) - # compile the function - exec source in glob - return glob['do'] - - def internal_sourcecalllist(self, calllist): - """NOT_RPYTHON - Translate a call list into the source of a Python function - which is optimized and doesn't do repeated conversions on the - same arguments.""" - if len(calllist) == 1: - fn, conversions = calllist[0] - if conversions == ((),) * len(conversions): - # no conversion, just calling a single function: return - # that function directly - return '', {'do': fn} - - #print '**** compile **** ', self.operatorsymbol, [ - # t.__name__ for t in argclasses] - arglist = ['a%d'%i for i in range(self.dispatch_arity)] + ['*extraargs'] - source = ['def do(space,%s):' % ','.join(arglist)] - converted = [{(): ('a%d'%i, False)} for i in range(self.dispatch_arity)] - - def make_conversion(argi, convtuple): - if convtuple in converted[argi]: - return converted[argi][convtuple] - else: - prev, can_fail = make_conversion(argi, convtuple[:-1]) - new = '%s_%d' % (prev, len(converted[argi])) - fname = all_functions.setdefault(convtuple[-1], - 'd%d' % len(all_functions)) - if can_fail: - source.append(' if isinstance(%s, FailedToImplement):' % prev) - source.append(' %s = %s' % (new, prev)) - source.append(' else:') - indent = ' ' - else: - indent = ' ' - source.append('%s%s = %s(space,%s)' % (indent, new, fname, prev)) - can_fail = can_fail or getattr(convtuple[-1], 'can_fail', False) - converted[argi][convtuple] = new, can_fail - return new, can_fail - - all_functions = {} - has_firstfailure = False - for fn, conversions in calllist: - # make the required conversions - fname = all_functions.setdefault(fn, 'f%d' % len(all_functions)) - arglist = [] - failcheck = [] - for i in range(self.dispatch_arity): - argname, can_fail = make_conversion(i, conversions[i]) - arglist.append(argname) - if can_fail: - failcheck.append(argname) - arglist.append('*extraargs') - source.append( ' try:') - for argname in failcheck: - source.append(' if isinstance(%s, FailedToImplement):' % argname) - source.append(' raise %s' % argname) - source.append( ' return %s(space,%s)' % ( - fname, ','.join(arglist))) - if has_firstfailure: - source.append(' except FailedToImplement:') - else: - source.append(' except FailedToImplement, firstfailure:') - has_firstfailure = True - source.append( ' pass') - - # complete exhaustion - if has_firstfailure: - source.append(' raise firstfailure') - else: - source.append(' raise FailedToImplement()') - source.append('') - - glob = {'FailedToImplement': FailedToImplement} - for fn, fname in all_functions.items(): - glob[fname] = fn - return '\n'.join(source), glob - - def internal_buildcalllist(self, argclasses, delegate, calllist): - """NOT_RPYTHON - Build a list of calls to try for the given classes of the - arguments. The list contains 'calls' of the following form: - (function-to-call, list-of-list-of-converters) - The list of converters contains a list of converter functions per - argument, with [] meaning that no conversion is needed for - that argument.""" - # look for an exact match first - arity = self.dispatch_arity - assert arity == len(argclasses) - dispatchclasses = tuple([(c,) for c in argclasses]) - choicelist = self.buildchoices(dispatchclasses) - seen_functions = {} - no_conversion = [()] * arity - for signature, function in choicelist: - calllist.append((function, tuple(no_conversion))) - seen_functions[function] = 1 - - # proceed by expanding the last argument by delegation, step by step - # until no longer possible, and then the previous argument, and so on. - expanded_args = () - expanded_dispcls = () - - for argi in range(arity-1, -1, -1): - # growing tuple of dispatch classes we can delegate to - curdispcls = dispatchclasses[argi] - assert len(curdispcls) == 1 - # maps each dispatch class to a list of converters - curargs = {curdispcls[0]: []} - # reduce dispatchclasses to the arguments before this one - # (on which no delegation has been tried yet) - dispatchclasses = dispatchclasses[:argi] - no_conversion = no_conversion[:argi] - - while 1: - choicelist = delegate.buildchoices((curdispcls,)) - # the list is sorted by priority - progress = False - for (t,), function in choicelist: - if function is None: - # this marks a decrease in the priority. - # Don't try delegators with lower priority if - # we have already progressed. - if progress: - break - else: - assert hasattr(function, 'result_class'), ( - "delegator %r must have a result_class" % function) - nt = function.result_class - if nt not in curargs: - curdispcls += (nt,) - srcconvs = curargs[t] - if not getattr(function, 'trivial_delegation',False): - srcconvs = srcconvs + [function] - curargs[nt] = srcconvs - progress = True - else: - if not progress: - break # no progress, and delegators list exhausted - - # progress: try again to dispatch with this new set of types - choicelist = self.buildchoices( - dispatchclasses + (curdispcls,) + expanded_dispcls) - for signature, function in choicelist: - if function not in seen_functions: - seen_functions[function] = 1 - # collect arguments: arguments after position argi... - after_argi = [tuple(expanded_args[j][signature[j]]) - for j in range(argi+1-arity, 0)] # nb. j<0 - # collect arguments: argument argi... - arg_argi = tuple(curargs[signature[argi]]) - # collect all arguments - newargs = no_conversion + [arg_argi] + after_argi - # record the call - calllist.append((function, tuple(newargs))) - # end of while 1: try on delegating the same argument i - - # proceed to the next argument - expanded_args = (curargs,) + expanded_args - expanded_dispcls = (curdispcls,) + expanded_dispcls - - def buildchoices(self, allowedtypes): - """NOT_RPYTHON - Build a list of all possible implementations we can dispatch to, - sorted best-first, ignoring delegation.""" - # 'types' is a tuple of tuples of classes, one tuple of classes per - # argument. (After delegation, we need to call buildchoice() with - # more than one possible class for a single argument.) - result = [] - self.internal_buildchoices(allowedtypes, (), result) - self.postprocessresult(allowedtypes, result) - #print self.operatorsymbol, allowedtypes, result - # the result is a list a tuples (function, signature). - return result - - def postprocessresult(self, allowedtypes, result): - "NOT_RPYTHON" - - def internal_buildchoices(self, initialtypes, currenttypes, result): - "NOT_RPYTHON" - if len(currenttypes) == self.dispatch_arity: - currenttypes += (W_ANY,) * (self.arity - self.dispatch_arity) - for func in self.dispatch_table.get(currenttypes, []): - if func not in result: # ignore duplicates - result.append((currenttypes, func)) - else: - classtuple = initialtypes[len(currenttypes)] - for nexttype in classtuple: - self.internal_buildchoices(initialtypes, - currenttypes + (nexttype,), - result) - - def is_empty(self): - return not self.dispatch_table - - -class MultiMethod(AbstractMultiMethod): - - def __init__(self, operatorsymbol, arity, specialnames=None, **extras): - """NOT_RPYTHON: cannot create new multimethods dynamically. - MultiMethod dispatching on the first 'arity' arguments. - """ - AbstractMultiMethod.__init__(self, operatorsymbol, arity) - if arity < 1: - raise ValueError, "multimethods cannot dispatch on nothing" - if specialnames is None: - specialnames = [operatorsymbol] - self.specialnames = specialnames # e.g. ['__xxx__', '__rxxx__'] - self.extras = extras - self.unbound_versions = {} - - - def __get__(self, space, cls=None): - if space is None: - return self - else: - return BoundMultiMethod(space, self) - - get = __get__ - - def slice(self, typeclass, bound_position=0): - "NOT_RPYTHON" - try: - return self.unbound_versions[typeclass, bound_position] - except KeyError: - m = UnboundMultiMethod(self, typeclass, bound_position) - self.unbound_versions[typeclass, bound_position] = m - return m - - def register(self, function, *types): - """NOT_RPYTHON: cannot register new multimethod - implementations dynamically""" - if not AbstractMultiMethod.register(self, function, *types): - return False - # register the function into unbound versions that match - for m in self.unbound_versions.values(): - if m.match(types): - AbstractMultiMethod.register(m, function, *types) - return True - - -class DelegateMultiMethod(MultiMethod): - - def __init__(self): - "NOT_RPYTHON: cannot create new multimethods dynamically." - MultiMethod.__init__(self, 'delegate', 1, []) - self.key = object() - - def register(self, function, *types): - """NOT_RPYTHON: cannot register new multimethod - implementations dynamically""" - if not AbstractMultiMethod.register(self, function, *types): - return False - self.key = object() # change the key to force recomputation - return True - - def postprocessresult(self, allowedtypes, result): - "NOT_RPYTHON" - # add delegation from a class to the *first* immediate parent class - # and to W_ANY - arg1types, = allowedtypes - for t in arg1types: - if t.__bases__: - base = t.__bases__[0] - def delegate_to_parent_class(space, a): - return a - delegate_to_parent_class.trivial_delegation = True - delegate_to_parent_class.result_class = base - delegate_to_parent_class.priority = 0 - # hard-wire it at priority 0 - result.append(((t,), delegate_to_parent_class)) - - def delegate_to_any(space, a): - return a - delegate_to_any.trivial_delegation = True - delegate_to_any.result_class = W_ANY - delegate_to_any.priority = -999 - # hard-wire it at priority -999 - result.append(((t,), delegate_to_any)) - - # sort the results in priority order, and insert None marks - # between jumps in the priority values. Higher priority values - # first. - by_priority = {} # classify delegators by priority - for signature, function in result: - assert hasattr(function, 'priority'), ( - "delegator %r must have a priority" % function) - sublist = by_priority.setdefault(function.priority, []) - sublist.append((signature, function)) - delegators = by_priority.items() - delegators.sort() - delegators.reverse() - del result[:] - for priority, sublist in delegators: - if result: - result.append(((None,), None)) - result += sublist - - -class UnboundMultiMethod(AbstractMultiMethod): - - def __init__(self, basemultimethod, typeclass, bound_position=0): - "NOT_RPYTHON: cannot create new multimethods dynamically." - AbstractMultiMethod.__init__(self, - basemultimethod.operatorsymbol, - basemultimethod.arity) - self.basemultimethod = basemultimethod - self.typeclass = typeclass - self.bound_position = bound_position - # get all the already-registered matching functions from parent - for types, functions in basemultimethod.dispatch_table.iteritems(): - if self.match(types): - self.dispatch_table[types] = functions - self.adjust_dispatch_arity(types) - #print basemultimethod.operatorsymbol, typeclass, self.dispatch_table - - def register(self, function, *types): - """NOT_RPYTHON: cannot register new multimethod - implementations dynamically""" - if not AbstractMultiMethod.register(self, function, *types): - return False - # propagate the function registeration to the base multimethod - # and possibly other UnboundMultiMethods - self.basemultimethod.register(function, *types) - return True - - def match(self, types): - "NOT_RPYTHON" - # check if the 'types' signature statically corresponds to the - # restriction of the present UnboundMultiMethod. - # Only accept an exact match; having merely subclass should - # be taken care of by the general look-up rules. - t = types[self.bound_position].typedef - return t is self.typeclass or ( - getattr(t, 'could_also_match', None) is self.typeclass) - - def __get__(self, space, cls=None): - if space is None: - return self - else: - return BoundMultiMethod(space, self) - - get = __get__ - - -class BoundMultiMethod: - ASSERT_BASE_TYPE = object - - def __init__(self, space, multimethod): - self.space = space - self.multimethod = multimethod - - def __eq__(self, other): - return (self.__class__ == other.__class__ and - self.space == other.space and - self.multimethod == other.multimethod) - - def __ne__(self, other): - return self != other - - def __hash__(self): - return hash((self.__class__, self.space, self.multimethod)) - - def __call__(self, *args): - if len(args) < self.multimethod.arity: - raise TypeError, ("multimethod needs at least %d arguments" % - self.multimethod.arity) - try: - return self.perform_call(*args) - except FailedToImplement, e: - if e.args: - raise OperationError(e.args[0], e.args[1]) - else: - # raise a TypeError for a FailedToImplement - initialtypes = [a.__class__ - for a in args[:self.multimethod.dispatch_arity]] - if len(initialtypes) <= 1: - plural = "" - else: - plural = "s" - debugtypenames = [t.__name__ for t in initialtypes] - message = "unsupported operand type%s for %s (%s)" % ( - plural, self.multimethod.operatorsymbol, - ', '.join(debugtypenames)) - w_value = self.space.wrap(message) - raise OperationError(self.space.w_TypeError, w_value) - - def perform_call(self, *args): - for a in args: - assert isinstance(a, self.ASSERT_BASE_TYPE), ( - "'%s' multimethod got a non-wrapped argument: %r" % ( - self.multimethod.operatorsymbol, a)) - arity = self.multimethod.dispatch_arity - argclasses = tuple([a.__class__ for a in args[:arity]]) - delegate = self.space.delegate.multimethod - fn = self.multimethod.compile_calllist(argclasses, delegate) - return fn(self.space, *args) - - def is_empty(self): - return self.multimethod.is_empty() - - def __repr__(self): - return '<Bound %r>'%(self.multimethod,) - - -class error(Exception): - "Thrown to you when you do something wrong with multimethods." diff --git a/pypy/objspace/std/noneobject.py b/pypy/objspace/std/noneobject.py deleted file mode 100644 index 9b5b4d1144..0000000000 --- a/pypy/objspace/std/noneobject.py +++ /dev/null @@ -1,23 +0,0 @@ -""" - None Object implementation - - ok and tested -""" - -from pypy.objspace.std.objspace import * - -class W_NoneObject(W_Object): - from pypy.objspace.std.nonetype import none_typedef as typedef -registerimplementation(W_NoneObject) - -def unwrap__None(space, w_none): - return None - -def nonzero__None(space, w_none): - return space.w_False - -def repr__None(space, w_none): - return space.wrap('None') - -register_all(vars()) - diff --git a/pypy/objspace/std/nonetype.py b/pypy/objspace/std/nonetype.py deleted file mode 100644 index 9e3ef52ae0..0000000000 --- a/pypy/objspace/std/nonetype.py +++ /dev/null @@ -1,7 +0,0 @@ -from pypy.objspace.std.stdtypedef import * - - -# ____________________________________________________________ - -none_typedef = StdTypeDef("NoneType", - ) diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py deleted file mode 100644 index f3f37a7302..0000000000 --- a/pypy/objspace/std/objectobject.py +++ /dev/null @@ -1,19 +0,0 @@ -from pypy.objspace.std.objspace import * - - -class W_ObjectObject(W_Object): - """Instances of this class are what the user can directly see with an - 'object()' call.""" - from pypy.objspace.std.objecttype import object_typedef as typedef - - -# any-to-object delegation is quite trivial, because W_ObjectObject is. -def delegate__ANY(space, w_obj): - return W_ObjectObject(space) -delegate__ANY.result_class = W_ObjectObject -delegate__ANY.priority = PRIORITY_PARENT_TYPE - -# ____________________________________________________________ - - -register_all(vars()) diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py deleted file mode 100644 index 59af614820..0000000000 --- a/pypy/objspace/std/objecttype.py +++ /dev/null @@ -1,55 +0,0 @@ -from pypy.interpreter.error import OperationError -from pypy.objspace.descroperation import Object -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.objspace import StdObjSpace - - -def descr__repr__(space, w_obj): - w = space.wrap - classname = space.unwrap(space.getattr(space.type(w_obj), w("__name__"))) - id = space.unwrap(space.id(w_obj)) - return w("<%s object at 0x%x>" % (classname, id)) - -def descr__str__(space, w_obj): - return space.repr(w_obj) - -def descr__hash__(space, w_obj): - # XXX detect non-hashable instances (the ones overriding comparison only) - return space.id(w_obj) - -def descr__class__(space, w_obj): - return space.type(w_obj) - -def descr__new__(space, w_type, __args__): - from pypy.objspace.std.objectobject import W_ObjectObject - # don't allow arguments if the default object.__init__() is about - # to be called - w_parentinit, w_ignored = w_type.lookup_where('__init__') - if w_parentinit is space.w_object: - try: - __args__.fixedunpack(0) - except ValueError: - raise OperationError(space.w_TypeError, - space.wrap("default __new__ takes " - "no parameters")) - w_obj = space.allocate_instance(W_ObjectObject, w_type) - w_obj.__init__(space) - return w_obj - -def descr__init__(space, w_obj, __args__): - pass - -# ____________________________________________________________ - -object_typedef = StdTypeDef("object", - __getattribute__ = gateway.interp2app(Object.descr__getattribute__.im_func), - __setattr__ = gateway.interp2app(Object.descr__setattr__.im_func), - __delattr__ = gateway.interp2app(Object.descr__delattr__.im_func), - __str__ = gateway.interp2app(descr__str__), - __repr__ = gateway.interp2app(descr__repr__), - __hash__ = gateway.interp2app(descr__hash__), - __class__ = GetSetProperty(descr__class__), - __new__ = newmethod(descr__new__), - __init__ = gateway.interp2app(descr__init__), - ) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py deleted file mode 100644 index 2fcbdace94..0000000000 --- a/pypy/objspace/std/objspace.py +++ /dev/null @@ -1,394 +0,0 @@ -from pypy.objspace.std.register_all import register_all -from pypy.interpreter.baseobjspace import * -from pypy.interpreter.typedef import get_unique_interplevel_subclass -from pypy.interpreter.typedef import instantiate -from pypy.tool.cache import Cache -from pypy.objspace.std.multimethod import * -from pypy.objspace.descroperation import DescrOperation -from pypy.objspace.std import stdtypedef -import types - -class W_Object(W_Root, object): - "Parent base class for wrapped objects." - typedef = None - - def __init__(w_self, space): - w_self.space = space # XXX not sure this is ever used any more - - def __repr__(self): - s = '%s(%s)' % ( - self.__class__.__name__, - #', '.join(['%s=%r' % keyvalue for keyvalue in self.__dict__.items()]) - getattr(self, 'name', '') - ) - if hasattr(self, 'w__class__'): - s += ' instance of %s' % self.w__class__ - return '<%s>' % s - -# delegation priorities -PRIORITY_SAME_TYPE = 2 # converting between several impls of the same type -PRIORITY_PARENT_TYPE = 1 # converting to a base type (e.g. bool -> int) -PRIORITY_PARENT_IMPL = 0 # hard-wired in multimethod.py (W_IntObject->W_Object) -PRIORITY_CHANGE_TYPE = -1 # changing type altogether (e.g. int -> float) -PRIORITY_ANY = -999 # hard-wired in multimethod.py (... -> W_ANY) - -def registerimplementation(implcls): - # this function should ultimately register the implementation class somewhere - # it may be modified to take 'typedef' instead of requiring it to be - # stored in 'implcls' itself - assert issubclass(implcls, W_Object) - - -################################################################## - -class StdObjSpace(ObjSpace, DescrOperation): - """The standard object space, implementing a general-purpose object - library in Restricted Python.""" - - PACKAGE_PATH = 'objspace.std' - - def standard_types(self): - "NOT_RPYTHON: only for initializing the space." - class result: - "Import here the types you want to have appear in __builtin__." - - from pypy.objspace.std.objecttype import object_typedef - from pypy.objspace.std.booltype import bool_typedef - from pypy.objspace.std.inttype import int_typedef - from pypy.objspace.std.floattype import float_typedef - from pypy.objspace.std.tupletype import tuple_typedef - from pypy.objspace.std.listtype import list_typedef - from pypy.objspace.std.dicttype import dict_typedef - from pypy.objspace.std.basestringtype import basestring_typedef - from pypy.objspace.std.stringtype import str_typedef - from pypy.objspace.std.typetype import type_typedef - from pypy.objspace.std.slicetype import slice_typedef - from pypy.objspace.std.longtype import long_typedef - from pypy.objspace.std.unicodetype import unicode_typedef - return [value for key, value in result.__dict__.items() - if not key.startswith('_')] # don't look - - def clone_exception_hierarchy(self): - "NOT_RPYTHON: only for initializing the space." - from pypy.objspace.std.typeobject import W_TypeObject - from pypy.interpreter import gateway - w = self.wrap - def app___init__(self, *args): - self.args = args - w_init = w(gateway.app2interp(app___init__)) - def app___str__(self): - l = len(self.args) - if l == 0: - return '' - elif l == 1: - return str(self.args[0]) - else: - return str(self.args) - w_str = w(gateway.app2interp(app___str__)) - import exceptions - - # to create types, we should call the standard type object; - # but being able to do that depends on the existence of some - # of the exceptions... - - self.w_Exception = W_TypeObject( - self, - 'Exception', - [self.w_object], - {'__init__': w_init, - '__str__': w_str}, - ) - done = {'Exception': self.w_Exception} - - # some of the complexity of the following is due to the fact - # that we need to create the tree root first, but the only - # connections we have go in the inconvenient direction... - - for k in dir(exceptions): - if k not in done: - v = getattr(exceptions, k) - if not isinstance(v, type(Exception)): - continue - if not issubclass(v, Exception): - continue - stack = [k] - while stack: - next = stack[-1] - if next not in done: - v = getattr(exceptions, next) - b = v.__bases__[0] - if b.__name__ not in done: - stack.append(b.__name__) - continue - else: - base = done[b.__name__] - newtype = W_TypeObject( - self, - next, - [base], - {}, - ) - setattr(self, - 'w_' + next, - newtype) - done[next] = newtype - stack.pop() - else: - stack.pop() - return done - - def initialize(self): - "NOT_RPYTHON: only for initializing the space." - self._typecache = Cache() - - # The object implementations that we want to 'link' into PyPy must be - # imported here. This registers them into the multimethod tables, - # *before* the type objects are built from these multimethod tables. - from pypy.objspace.std import objectobject - from pypy.objspace.std import boolobject - from pypy.objspace.std import intobject - from pypy.objspace.std import floatobject - from pypy.objspace.std import tupleobject - from pypy.objspace.std import listobject - from pypy.objspace.std import dictobject - from pypy.objspace.std import stringobject - from pypy.objspace.std import typeobject - from pypy.objspace.std import sliceobject - from pypy.objspace.std import longobject - from pypy.objspace.std import noneobject - from pypy.objspace.std import iterobject - from pypy.objspace.std import unicodeobject - from pypy.objspace.std import fake - # hack to avoid imports in the time-critical functions below - global W_ObjectObject, W_BoolObject, W_IntObject, W_FloatObject - global W_TupleObject, W_ListObject, W_DictObject, W_StringObject - global W_TypeObject, W_SliceObject, W_LongObject, W_NoneObject - global W_SeqIterObject, W_UnicodeObject, fake_type - W_ObjectObject = objectobject.W_ObjectObject - W_BoolObject = boolobject.W_BoolObject - W_IntObject = intobject.W_IntObject - W_FloatObject = floatobject.W_FloatObject - W_TupleObject = tupleobject.W_TupleObject - W_ListObject = listobject.W_ListObject - W_DictObject = dictobject.W_DictObject - W_StringObject = stringobject.W_StringObject - W_TypeObject = typeobject.W_TypeObject - W_SliceObject = sliceobject.W_SliceObject - W_LongObject = longobject.W_LongObject - W_NoneObject = noneobject.W_NoneObject - W_SeqIterObject = iterobject.W_SeqIterObject - W_UnicodeObject = unicodeobject.W_UnicodeObject - fake_type = fake.fake_type - # end of hacks - # singletons - self.w_None = W_NoneObject(self) - self.w_False = W_BoolObject(self, False) - self.w_True = W_BoolObject(self, True) - from pypy.interpreter.special import NotImplemented, Ellipsis - self.w_NotImplemented = self.wrap(NotImplemented(self)) - self.w_Ellipsis = self.wrap(Ellipsis(self)) - - for_builtins = {"False": self.w_False, - "True" : self.w_True, - "None" : self.w_None, - "NotImplemented": self.w_NotImplemented, - "Ellipsis": self.w_Ellipsis, -# "long": self.wrap(long), # XXX temporary - } - - # types - self.types_w = {} - for typedef in self.standard_types(): - w_type = self.gettypeobject(typedef) - setattr(self, 'w_' + typedef.name, w_type) - for_builtins[typedef.name] = w_type - - # exceptions - for_builtins.update(self.clone_exception_hierarchy()) - - self.make_builtins(for_builtins) - - def gettypeobject(self, typedef): - # types_w maps each StdTypeDef instance to its - # unique-for-this-space W_TypeObject instance - return self.loadfromcache(typedef, - stdtypedef.buildtypeobject, - self._typecache) - - def wrap(self, x): - "Wraps the Python value 'x' into one of the wrapper classes." - if x is None: - return self.w_None - if isinstance(x, W_Object): - raise TypeError, "attempt to wrap already wrapped object: %s"%(x,) - if isinstance(x, OperationError): - raise TypeError, ("attempt to wrap already wrapped exception: %s"% - (x,)) - if isinstance(x, int): - if isinstance(bool, type) and isinstance(x, bool): - return self.newbool(x) - return W_IntObject(self, x) - if isinstance(x, str): - return W_StringObject(self, x) - if isinstance(x, dict): - items_w = [(self.wrap(k), self.wrap(v)) for (k, v) in x.iteritems()] - return W_DictObject(self, items_w) - if isinstance(x, float): - return W_FloatObject(self, x) - if isinstance(x, tuple): - wrappeditems = [self.wrap(item) for item in x] - return W_TupleObject(self, wrappeditems) - if isinstance(x, list): - wrappeditems = [self.wrap(item) for item in x] - return W_ListObject(self, wrappeditems) - if isinstance(x, long): - return W_LongObject(self, x) -## if isinstance(x, complex): -## # XXX is this right? YYY no, this is wrong right now (CT) -## c = self.getitem(self.w_builtins, self.wrap("complex")) -## return self.call_function(c, -## self.wrap(x.real), -## self.wrap(x.imag)) - if isinstance(x, BaseWrappable): - w_result = x.__spacebind__(self) - #print 'wrapping', x, '->', w_result - return w_result - # anything below this line is implicitly XXX'ed - if isinstance(x, type(Exception)) and issubclass(x, Exception): - if hasattr(self, 'w_' + x.__name__): - w_result = getattr(self, 'w_' + x.__name__) - assert isinstance(w_result, W_TypeObject) - return w_result - if isinstance(x, type): - ft = fake_type(x) - return self.gettypeobject(ft.typedef) - ft = fake_type(type(x)) - return ft(self, x) - wrap._specialize_ = "argtypes" - - def newint(self, intval): - return W_IntObject(self, intval) - - def newfloat(self, floatval): - return W_FloatObject(self, floatval) - - def newtuple(self, list_w): - assert isinstance(list_w, list) - return W_TupleObject(self, list_w) - - def newlist(self, list_w): - return W_ListObject(self, list_w) - - def newdict(self, list_pairs_w): - return W_DictObject(self, list_pairs_w) - - def newslice(self, w_start, w_end, w_step): - # w_step may be a real None - return W_SliceObject(self, w_start, w_end, w_step) - - def newstring(self, chars_w): - try: - chars = [chr(self.unwrap(w_c)) for w_c in chars_w] - except TypeError: # chr(not-an-integer) - raise OperationError(self.w_TypeError, - self.wrap("an integer is required")) - except ValueError: # chr(out-of-range) - raise OperationError(self.w_ValueError, - self.wrap("character code not in range(256)")) - return W_StringObject(self, ''.join(chars)) - - def newseqiter(self, w_obj): - return W_SeqIterObject(self, w_obj) - - def type(self, w_obj): - return w_obj.getclass(self) - - def lookup(self, w_obj, name): - w_type = w_obj.getclass(self) - return w_type.lookup(name) - - def allocate_instance(self, cls, w_subtype): - """Allocate the memory needed for an instance of an internal or - user-defined type, without actually __init__ializing the instance.""" - w_type = self.gettypeobject(cls.typedef) - if self.is_true(self.is_(w_type, w_subtype)): - return instantiate(cls) - else: - w_type.check_user_subclass(w_subtype) - subcls = get_unique_interplevel_subclass(cls) - instance = instantiate(subcls) - instance.user_setup(self, w_subtype) - return instance - - def unpacktuple(self, w_tuple, expected_length=None): - assert isinstance(w_tuple, W_TupleObject) - t = w_tuple.wrappeditems - if expected_length is not None and expected_length != len(t): - raise ValueError, "got a tuple of length %d instead of %d" % ( - len(t), expected_length) - return t - - - class MM: - "Container for multimethods." - #is_data_descr = MultiMethod('is_data_descr', 1, []) # returns an unwrapped bool - #getdict = MultiMethod('getdict', 1, []) # get '.__dict__' attribute - next = MultiMethod('next', 1, ['next']) # iterator interface - call = MultiMethod('call', 1, ['__call__'], varargs=True, keywords=True) - #getattribute = MultiMethod('getattr', 2, ['__getattribute__']) # XXX hack - # special visible multimethods - delegate = DelegateMultiMethod() # delegators - unwrap = MultiMethod('unwrap', 1, []) # returns an unwrapped object - issubtype = MultiMethod('issubtype', 2, []) - id = MultiMethod('id', 1, []) - init = MultiMethod('__init__', 1, varargs=True, keywords=True) - - unwrap = MM.unwrap - delegate = MM.delegate - #is_true = MM.is_true - - def is_(self, w_one, w_two): - # XXX a bit of hacking to gain more speed - if w_one is w_two: - return self.w_True - return self.w_False - - def is_true(self, w_obj): - # XXX don't look! - if isinstance(w_obj, W_DictObject): - return not not w_obj.used - else: - return DescrOperation.is_true(self, w_obj) - - def hash(space, w_obj): - w = space.wrap - eq = '__eq__' - ne = '__ne__' - hash_s = '__hash__' - - for w_t in space.type(w_obj).mro_w: - d = w_t.dict_w - if hash_s in d: - w_descr = d[hash_s] - return space.get_and_call_function(w_descr, w_obj) - if eq in d: - raise OperationError(space.w_TypeError, w("unhashable type")) - return space.id(w_obj) - -# add all regular multimethods to StdObjSpace -for _name, _symbol, _arity, _specialnames in ObjSpace.MethodTable: - if not hasattr(StdObjSpace.MM, _name): -## if isinstance(getattr(StdObjSpace, _name, None), MultiMethod): -## mm = getattr(StdObjSpace, _name) -## else: - mm = MultiMethod(_symbol, _arity, _specialnames) - setattr(StdObjSpace.MM, _name, mm) - if not hasattr(StdObjSpace, _name): - setattr(StdObjSpace, _name, getattr(StdObjSpace.MM, _name)) - -# import the common base W_ObjectObject as well as -# default implementations of some multimethods for all objects -# that don't explicitly override them or that raise FailedToImplement -from pypy.objspace.std.register_all import register_all -import pypy.objspace.std.objectobject -import pypy.objspace.std.default diff --git a/pypy/objspace/std/register_all.py b/pypy/objspace/std/register_all.py deleted file mode 100644 index 9879375f52..0000000000 --- a/pypy/objspace/std/register_all.py +++ /dev/null @@ -1,132 +0,0 @@ - -_name_mappings = { - 'and': 'and_', - 'or': 'or_', - } - -def register_all(module_dict, alt_ns=None): - """register implementations for multimethods. - - By default a (name, object) pair of the given module dictionary - is registered on the multimethod 'name' of StdObjSpace. - If the name doesn't exist then the alternative namespace is tried - for registration. - """ - from pypy.objspace.std.objspace import StdObjSpace, W_ANY, W_Object - namespaces = [StdObjSpace.MM, StdObjSpace] - if alt_ns: - namespaces.insert(0, alt_ns) - - for name, obj in module_dict.items(): - if name.find('__')<1 or name.startswith('app_'): - continue - funcname, sig = name.split('__') - l=[] - for i in sig.split('_'): - if i == 'ANY': # just in case W_ANY is not in module_dict - icls = W_ANY - elif i == 'Object': # just in case W_Object is not in module_dict - icls = W_Object - else: - icls = (module_dict.get('W_%s' % i) or - module_dict.get('W_%sObject' % i)) - if icls is None: - raise ValueError, \ - "no W_%s or W_%sObject for the definition of %s" % ( - i, i, name) - l.append(icls) - - #XXX trying to be too clever at the moment for userobject.SpecialMethod - #if len(l) != obj.func_code.co_argcount-1: - # raise ValueError, \ - # "function name %s doesn't specify exactly %d arguments" % ( - # repr(name), obj.func_code.co_argcount-1) - - funcname = _name_mappings.get(funcname, funcname) - - func = hack_func_by_name(funcname, namespaces) - func.register(obj, *l) - - add_extra_comparisons() - - -def hack_func_by_name(funcname, namespaces): - for ns in namespaces: - if isinstance(ns, dict): - if funcname in ns: - return ns[funcname] - else: - if hasattr(ns, funcname): - return getattr(ns, funcname) - #import typetype - #try: - # return getattr(typetype.W_TypeType, funcname) - #except AttributeError: - # pass # catches not only the getattr() but the typetype.W_TypeType - # # in case it is not fully imported yet :-(((( - from pypy.objspace.std import objecttype - try: - return getattr(objecttype, funcname) - except AttributeError: - pass - raise NameError, ("trying hard but not finding a multimethod named %s" % - funcname) - - -def op_negated(function): - def op(space, w_1, w_2, function=function): - return space.not_(function(space, w_1, w_2)) - return op - -def op_swapped(function): - def op(space, w_1, w_2, function=function): - return function(space, w_2, w_1) - return op - -def op_swapped_negated(function): - def op(space, w_1, w_2, function=function): - return space.not_(function(space, w_2, w_1)) - return op - -OPERATORS = ['lt', 'le', 'eq', 'ne', 'gt', 'ge'] -OP_CORRESPONDANCES = [ - ('eq', 'ne', op_negated), - ('lt', 'gt', op_swapped), - ('le', 'ge', op_swapped), - ('lt', 'ge', op_negated), - ('le', 'gt', op_negated), - ('lt', 'le', op_swapped_negated), - ('gt', 'ge', op_swapped_negated), - ] -for op1, op2, value in OP_CORRESPONDANCES[:]: - i = OP_CORRESPONDANCES.index((op1, op2, value)) - OP_CORRESPONDANCES.insert(i+1, (op2, op1, value)) - -def add_extra_comparisons(): - """ - Add the missing comparison operators if they were not explicitly - defined: eq <-> ne and lt <-> le <-> gt <-> ge. - We try to add them in the order defined by the OP_CORRESPONDANCES - table, thus favouring swapping the arguments over negating the result. - """ - from pypy.objspace.std.objspace import StdObjSpace, W_ANY - originaltable = {} - for op in OPERATORS: - originaltable[op] = getattr(StdObjSpace.MM, op).dispatch_table.copy() - - for op1, op2, correspondance in OP_CORRESPONDANCES: - mirrorfunc = getattr(StdObjSpace.MM, op2) - for types, functions in originaltable[op1].iteritems(): - t1, t2 = types - if t1 is t2: - if types not in mirrorfunc.dispatch_table: - assert len(functions) == 1, ('Automatic' - ' registration of comparison functions' - ' only work when there is a single method for' - ' the operation.') - #print 'adding %s <<<%s>>> %s as %s(%s)' % ( - # t1, op2, t2, - # correspondance.func_name, functions[0].func_name) - mirrorfunc.register( - correspondance(functions[0]), - *types) diff --git a/pypy/objspace/std/restricted_int.py b/pypy/objspace/std/restricted_int.py deleted file mode 100644 index b0a1cfc044..0000000000 --- a/pypy/objspace/std/restricted_int.py +++ /dev/null @@ -1,274 +0,0 @@ -""" -This file defines restricted integers. - -Purpose: -Have an integer implementation that emulates -restricted Python for CPython. - -r_int an integer type which has overflow checking. - It doesn not automatically extend to long -r_uint an unsigned integer which has not overflow - checking. It is always positive and always - truncated to the internal machine word size. - -We try to keep the number of such internal types -to a minimum. -""" - -class r_int(int): - """ fake integer implementation in order to make sure that - primitive integer operations do overflow """ - - def __add__(self, other): - x = int(self) - y = int(other) - return r_int(x + y) - __radd__ = __add__ - - def __sub__(self, other): - x = int(self) - y = int(other) - return r_int(x - y) - - def __rsub__(self, other): - y = int(self) - x = int(other) - return r_int(x - y) - - def __mul__(self, other): - x = int(self) - if not isinstance(other, (int, long)): - return x * other - y = int(other) - return r_int(x * y) - __rmul__ = __mul__ - - def __div__(self, other): - x = int(self) - y = int(other) - return r_int(x // y) - - __floordiv__ = __div__ - - def __rdiv__(self, other): - y = int(self) - x = int(other) - return r_int(x // y) - - __rfloordiv__ = __rdiv__ - - def __mod__(self, other): - x = int(self) - y = int(other) - return r_int(x % y) - - def __rmod__(self, other): - y = int(self) - x = int(other) - return r_int(x % y) - - def __divmod__(self, other): - x = int(self) - y = int(other) - res = divmod(x, y) - return (r_int(res[0]), r_int(res[1])) - - def __lshift__(self, n): - # ensure long shift, so we don't depend on - # shift truncation (2.3) vs. long(2.4) - x = long(self) - y = int(n) - return r_int(x << y) - - def __rlshift__(self, n): - y = long(self) - x = int(n) - return r_int(x << y) - - def __rshift__(self, n): - x = int(self) - y = int(n) - return r_int(x >> y) - - def __rrshift__(self, n): - y = int(self) - x = int(n) - return r_int(x >> y) - - def __or__(self, other): - x = int(self) - y = int(other) - return r_int(x | y) - __ror__ = __or__ - - def __and__(self, other): - x = int(self) - y = int(other) - return r_int(x & y) - __rand__ = __and__ - - def __xor__(self, other): - x = int(self) - y = int(other) - return r_int(x ^ y) - __rxor__ = __xor__ - - def __neg__(self): - x = int(self) - return r_int(-x) - - def __pos__(self): - return r_int(self) - - def __invert__(self): - x = int(self) - return r_int(~x) - - def __pow__(self, other, m=None): - x = int(self) - y = int(other) - res = pow(x, y, m) - return r_int(res) - - def __rpow__(self, other, m=None): - y = int(self) - x = int(other) - res = pow(x, y, m) - return r_int(res) - -# set up of machine internals -_bits = 0 -_itest = 1 -_Ltest = 1L -while _itest == _Ltest and type(_itest) is int: - _itest *= 2 - _Ltest *= 2 - _bits += 1 - -LONG_BIT = _bits+1 -LONG_MASK = _Ltest*2-1 - -del _bits, _itest, _Ltest - -class r_uint(long): - """ fake unsigned integer implementation """ - - _mask = LONG_MASK - - def __new__(klass, val): - return long.__new__(klass, val & klass._mask) - - def __add__(self, other): - x = long(self) - y = long(other) - return r_uint(x + y) - __radd__ = __add__ - - def __sub__(self, other): - x = long(self) - y = long(other) - return r_uint(x - y) - - def __rsub__(self, other): - y = long(self) - x = long(other) - return r_uint(x - y) - - def __mul__(self, other): - x = long(self) - if not isinstance(other, (int, long)): - return x * other - y = long(other) - return r_uint(x * y) - __rmul__ = __mul__ - - def __div__(self, other): - x = long(self) - y = long(other) - return r_uint(x // y) - - __floordiv__ = __div__ - - def __rdiv__(self, other): - y = long(self) - x = long(other) - return r_uint(x // y) - - __rfloordiv__ = __rdiv__ - - def __mod__(self, other): - x = long(self) - y = long(other) - return r_uint(x % y) - - def __rmod__(self, other): - y = long(self) - x = long(other) - return r_uint(x % y) - - def __divmod__(self, other): - x = long(self) - y = long(other) - res = divmod(x, y) - return (r_uint(res[0]), r_uint(res[1])) - - def __lshift__(self, n): - x = long(self) - y = long(n) - return r_uint(x << y) - - def __rlshift__(self, n): - y = long(self) - x = long(n) - return r_uint(x << y) - - def __rshift__(self, n): - x = long(self) - y = long(n) - return r_uint(x >> y) - - def __rrshift__(self, n): - y = long(self) - x = long(n) - return r_uint(x >> y) - - def __or__(self, other): - x = long(self) - y = long(other) - return r_uint(x | y) - __ror__ = __or__ - - def __and__(self, other): - x = long(self) - y = long(other) - return r_uint(x & y) - __rand__ = __and__ - - def __xor__(self, other): - x = long(self) - y = long(other) - return r_uint(x ^ y) - __rxor__ = __xor__ - - def __neg__(self): - x = long(self) - return r_uint(-x) - - def __pos__(self): - return r_uint(self) - - def __invert__(self): - x = long(self) - return r_uint(~x) - - def __pow__(self, other, m=None): - x = long(self) - y = long(other) - res = pow(x, y, m) - return r_uint(res) - - def __rpow__(self, other, m=None): - y = long(self) - x = long(other) - res = pow(x, y, m) - return r_uint(res) diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py deleted file mode 100644 index 1340bb55cf..0000000000 --- a/pypy/objspace/std/sliceobject.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -Reviewed 03-06-21 - -slice object construction tested, OK -indices method tested, OK -""" - -from pypy.objspace.std.objspace import * - - -class W_SliceObject(W_Object): - from pypy.objspace.std.slicetype import slice_typedef as typedef - - def __init__(w_self, space, w_start, w_stop, w_step): - W_Object.__init__(w_self, space) - w_self.w_start = w_start - w_self.w_stop = w_stop - w_self.w_step = w_step - -registerimplementation(W_SliceObject) - -register_all(vars()) diff --git a/pypy/objspace/std/slicetype.py b/pypy/objspace/std/slicetype.py deleted file mode 100644 index 8c6fa209a5..0000000000 --- a/pypy/objspace/std/slicetype.py +++ /dev/null @@ -1,112 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.register_all import register_all -from pypy.interpreter.error import OperationError - -slice_indices = MultiMethod('indices', 2) - -# default application-level implementations for some operations - -def app_slice_indices3(slice, length): - # this is used internally, analogous to CPython's PySlice_GetIndicesEx - step = slice.step - if step is None: - step = 1 - elif step == 0: - raise ValueError, "slice step cannot be zero" - if step < 0: - defstart = length - 1 - defstop = -1 - else: - defstart = 0 - defstop = length - - start = slice.start - if start is None: - start = defstart - else: - if start < 0: - start += length - if start < 0: - if step < 0: - start = -1 - else: - start = 0 - elif start >= length: - if step < 0: - start = length - 1 - else: - start = length - - stop = slice.stop - if stop is None: - stop = defstop - else: - if stop < 0: - stop += length - if stop < 0: - stop = -1 - elif stop > length: - stop = length - - return start, stop, step -slice_indices__ANY_ANY = slice_indices3 = gateway.app2interp(app_slice_indices3) - -def app_slice_indices4(slice, sequencelength): - start, stop, step = slice_indices3(slice, sequencelength) - slicelength = stop - start - lengthsign = cmp(slicelength, 0) - stepsign = cmp(step, 0) - if stepsign == lengthsign: - slicelength = (slicelength - lengthsign) // step + 1 - else: - slicelength = 0 - - return start, stop, step, slicelength -slice_indices4 = gateway.app2interp(app_slice_indices4) - -# utility functions -def indices3(space, w_slice, length): - w_result = slice_indices3(space, w_slice, space.wrap(length)) - w_1, w_2, w_3 = space.unpacktuple(w_result, 3) - return space.unwrap(w_1), space.unwrap(w_2), space.unwrap(w_3) - -def indices4(space, w_slice, length): - w_result = slice_indices4(space, w_slice, space.wrap(length)) - w_1, w_2, w_3, w_4 = space.unpacktuple(w_result, 4) - return (space.unwrap(w_1), space.unwrap(w_2), - space.unwrap(w_3), space.unwrap(w_4)) - -register_all(vars(), globals()) - -# ____________________________________________________________ - -def descr__new__(space, w_slicetype, *args_w): - from pypy.objspace.std.sliceobject import W_SliceObject - w_start = space.w_None - w_stop = space.w_None - w_step = space.w_None - if len(args_w) == 1: - w_stop, = args_w - elif len(args_w) == 2: - w_start, w_stop = args_w - elif len(args_w) == 3: - w_start, w_stop, w_step = args_w - elif len(args_w) > 3: - raise OperationError(space.w_TypeError, - space.wrap("slice() takes at most 3 arguments")) - else: - raise OperationError(space.w_TypeError, - space.wrap("slice() takes at least 1 argument")) - w_obj = space.allocate_instance(W_SliceObject, w_slicetype) - w_obj.__init__(space, w_start, w_stop, w_step) - return w_obj - -# ____________________________________________________________ - -slice_typedef = StdTypeDef("slice", - __new__ = newmethod(descr__new__), - start = attrproperty_w('w_start'), - stop = attrproperty_w('w_stop'), - step = attrproperty_w('w_step'), - ) -slice_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py deleted file mode 100644 index 5f84f81b5e..0000000000 --- a/pypy/objspace/std/stdtypedef.py +++ /dev/null @@ -1,191 +0,0 @@ -from pypy.interpreter import eval, function, gateway -from pypy.interpreter.error import OperationError -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.typedef import attrproperty, attrproperty_w -from pypy.objspace.std.multimethod import MultiMethod, FailedToImplement - -__all__ = ['StdTypeDef', 'newmethod', 'gateway', - 'GetSetProperty', 'attrproperty', 'attrproperty_w', - 'MultiMethod'] - - -class StdTypeDef(TypeDef): - - def __init__(self, __name, __base=None, **rawdict): - "NOT_RPYTHON: initialization-time only." - TypeDef.__init__(self, __name, __base, **rawdict) - self.local_multimethods = [] - - def registermethods(self, namespace): - "NOT_RPYTHON: initialization-time only." - self.local_multimethods += hack_out_multimethods(namespace) - -def issubtypedef(a, b): - from pypy.objspace.std.objecttype import object_typedef - if b is object_typedef: - return True - while a is not b: - a = a.base - if a is None: - return False - return True - - -def newmethod(descr_new): - "NOT_RPYTHON: initialization-time only." - # this is turned into a static method by the constructor of W_TypeObject. - return gateway.interp2app(descr_new) - -# ____________________________________________________________ -# -# All the code below fishes from the multimethod registration tables -# the descriptors to put into the W_TypeObjects. -# - -def buildtypeobject(typedef, space): - "NOT_RPYTHON: initialization-time only." - # build a W_TypeObject from this StdTypeDef - from pypy.objspace.std.typeobject import W_TypeObject - from pypy.objspace.std.objecttype import object_typedef - - w = space.wrap - rawdict = typedef.rawdict.copy() - - if isinstance(typedef, StdTypeDef): - # get all the sliced multimethods - multimethods = slicemultimethods(space.__class__, typedef) - for name, code in multimethods.items(): - # compute the slice and ignore the multimethod if empty - if not code.computeslice(space): - continue - # create a Function around the sliced multimethod code - fn = function.Function(space, code, defs_w=code.getdefaults(space)) - assert name not in rawdict, 'name clash: %s in %s_typedef' % ( - name, typedef.name) - rawdict[name] = fn - - # compute the bases - if typedef is object_typedef: - bases_w = [] - else: - base = typedef.base or object_typedef - bases_w = [space.gettypeobject(base)] - - # wrap everything - dict_w = {} - for descrname, descrvalue in rawdict.items(): - dict_w[descrname] = w(descrvalue) - - return W_TypeObject(space, typedef.name, bases_w, dict_w, - overridetypedef=typedef, forcedict=False) - -def hack_out_multimethods(ns): - "NOT_RPYTHON: initialization-time only." - result = [] - for value in ns.itervalues(): - if isinstance(value, MultiMethod): - result.append(value) - return result - -def slicemultimethod(multimethod, typeclass, result): - for i in range(len(multimethod.specialnames)): - # each MultimethodCode embeds a multimethod - name = multimethod.specialnames[i] - if name in result: - # conflict between e.g. __lt__ and - # __lt__-as-reversed-version-of-__gt__ - code = result[name] - if code.bound_position < i: - continue - mmframeclass = multimethod.extras.get('mmframeclass') - if mmframeclass is None: - if len(multimethod.specialnames) > 1: - mmframeclass = SpecialMmFrame - else: - mmframeclass = MmFrame - code = MultimethodCode(multimethod, mmframeclass, typeclass, i) - result[name] = code - -def slicemultimethods(spaceclass, typeclass): - result = {} - # import and slice all multimethods of the space.MM container - for multimethod in hack_out_multimethods(spaceclass.MM.__dict__): - slicemultimethod(multimethod, typeclass, result) - # import all multimethods defined directly on the type without slicing - for multimethod in typeclass.local_multimethods: - slicemultimethod(multimethod, None, result) - return result - -class MultimethodCode(eval.Code): - """A code object that invokes a multimethod.""" - - def __init__(self, multimethod, framecls, typeclass, bound_position=0): - "NOT_RPYTHON: initialization-time only." - eval.Code.__init__(self, multimethod.operatorsymbol) - self.basemultimethod = multimethod - self.typeclass = typeclass - self.bound_position = bound_position - self.framecls = framecls - argnames = ['x%d'%(i+1) for i in range(multimethod.arity)] - varargname = kwargname = None - # XXX do something about __call__ and __init__ which still use - # XXX packed arguments: w_args, w_kwds instead of *args_w, **kwds_w - if multimethod.extras.get('varargs', False): - varargname = 'args' - if multimethod.extras.get('keywords', False): - kwargname = 'keywords' - self.sig = argnames, varargname, kwargname - - def computeslice(self, space): - "NOT_RPYTHON: initialization-time only." - if self.typeclass is None: - slice = self.basemultimethod - else: - slice = self.basemultimethod.slice(self.typeclass, - self.bound_position) - if slice.is_empty(): - return False - else: - self.mm = slice.get(space) - return True - - def signature(self): - return self.sig - - def getdefaults(self, space): - return [space.wrap(x) - for x in self.basemultimethod.extras.get('defaults', ())] - - def create_frame(self, space, w_globals, closure=None): - return self.framecls(space, self) - -class MmFrame(eval.Frame): - - def setfastscope(self, scope_w): - args = list(scope_w) - args.insert(0, args.pop(self.code.bound_position)) - self.args = args - - def getfastscope(self): - raise OperationError(self.space.w_TypeError, - self.space.wrap("cannot get fastscope of a MmFrame")) - - def run(self): - "Call the multimethod, raising a TypeError if not implemented." - w_result = self.code.mm(*self.args) - # we accept a real None from operations with no return value - if w_result is None: - w_result = self.space.w_None - return w_result - -class SpecialMmFrame(MmFrame): - def run(self): - "Call the multimethods, possibly returning a NotImplemented." - try: - return self.code.mm.perform_call(*self.args) - except FailedToImplement, e: - if e.args: - raise OperationError(e.args[0], e.args[1]) - else: - return self.space.w_NotImplemented - diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py deleted file mode 100644 index 5138840f32..0000000000 --- a/pypy/objspace/std/stringobject.py +++ /dev/null @@ -1,1006 +0,0 @@ -# -*- Coding: Latin-1 -*- -""" -stringobject.py - -this is here: - to not confuse python-mode - -Synopsis of implemented methods (* marks work in progress) - -Py PyPy - - def _is_generic(w_self, fun): - def mod__String_ANY(space, w_str, w_item):def mod__String_Tuple(space, w_str, w_tuple):def mod_str_tuple(space, w_format, w_args): - def ord__String(space, w_str): - def string_richcompare(space, w_str1, w_str2, op): - def unwrap__String(space, w_str): -__add__ def add__String_String(space, w_left, w_right): -__class__ -__contains__ -__delattr__ -__doc__ -__eq__ def eq__String_String(space, w_str1, w_str2): -__ge__ def ge__String_String(space, w_str1, w_str2): -__getattribute__ -__getitem__ def getitem__String_Int(space, w_str, w_int): def getitem__String_Slice(space, w_str, w_slice): -__getslice__ -__gt__ def gt__String_String(space, w_str1, w_str2): -__hash__ def hash__String(space, w_str): -__init__ -__le__ def le__String_String(space, w_str1, w_str2): -__len__ def len__String(space, w_str): -__lt__ def lt__String_String(space, w_str1, w_str2): -__mul__ -__ne__ def ne__String_String(space, w_str1, w_str2): -__new__ -__reduce__ -__repr__ def repr__String(space, w_str): -__rmul__ -__setattr__ -__str__ def str__String(space, w_str): -capitalize def str_capitalize__String(space, w_self): -center def str_center__String_Int(space, w_self): -count def str_count__String_String_Int_Int(space, w_self): [optional arguments not supported now] -decode !Unicode not supported now -encode !Unicode not supported now -endswith str_endswith__String_String [optional arguments not supported now] -expandtabs str_expandtabs__String_Int -find OK -index OK -isalnum def str_isalnum__String(space, w_self): def _isalnum(ch): -isalpha def str_isalpha__String(space, w_self): def _isalpha(ch): -isdigit def str_isdigit__String(space, w_self): def _isdigit(ch): -islower def str_islower__String(space, w_self): def _islower(ch): -isspace def str_isspace__String(space, w_self): def _isspace(ch): -istitle def str_istitle(space, w_self): -isupper def str_isupper__String(space, w_self): def _isupper(ch): -join def str_join__String_ANY(space, w_self, w_list): -ljust def str_ljust__String_ANY(space, w_self, w_arg): -lower OK -lstrip def str_lstrip__String_String(space, w_self, w_chars): -replace OK -rfind OK -rindex OK -rjust def str_rjust__String_ANY(space, w_self, w_arg): -rstrip def str_rstrip__String_String(space, w_self, w_chars): -split def str_split__String_None_Int(space, w_self, w_none, w_maxsplit=-1):def str_split__String_String_Int(space, w_self, w_by, w_maxsplit=-1): -splitlines def str_splitlines__String_String(space, w_self, w_keepends): -startswith str_startswith__String_String [optional arguments not supported now] -strip def str_strip__String_String(space, w_self, w_chars): -swapcase OK -title def str_title__String(space, w_self): -translate OK -upper def str_upper__String(space, w_self): -zfill OK -""" - -from pypy.objspace.std.objspace import * -from pypy.interpreter import gateway -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.sliceobject import W_SliceObject -from pypy.objspace.std import slicetype -from pypy.objspace.std.listobject import W_ListObject -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.tupleobject import W_TupleObject - -# XXX consider reimplementing _value to be a list of characters -# instead of a plain string - - -class W_StringObject(W_Object): - from pypy.objspace.std.stringtype import str_typedef as typedef - - def __init__(w_self, space, str): - W_Object.__init__(w_self, space) - w_self._value = str - - def __repr__(w_self): - """ representation for debugging purposes """ - return "%s(%r)" % (w_self.__class__.__name__, w_self._value) - - -registerimplementation(W_StringObject) - - -def _isspace(ch): - return ord(ch) in (9, 10, 11, 12, 13, 32) - -def _isdigit(ch): - o = ord(ch) - return o >= 48 and o <= 57 - -def _isalpha(ch): - o = ord(ch) - return (o>=97 and o<=122) or (o>=65 and o<=90) - -def _isalnum(ch): - o = ord(ch) - return (o>=97 and o<=122) \ - or (o>=65 and o<=90) \ - or (o>=48 and o<=57) - -def _isupper(ch): - o = ord(ch) - return (o>=65 and o<=90) - -def _islower(ch): - o = ord(ch) - return (o>=97 and o<=122) - -def _is_generic(w_self, fun): - space = w_self.space - v = space.unwrap(w_self) - if len(v) == 0: - return space.w_False - if len(v) == 1: - c = v[0] - return space.newbool(fun(c)) - else: - res = 1 - for idx in range(len(v)): - if not fun(v[idx]): - return space.w_False - return space.w_True - -def str_isspace__String(space, w_self): - return _is_generic(w_self, _isspace) - -def str_isdigit__String(space, w_self): - return _is_generic(w_self, _isdigit) - -def str_isalpha__String(space, w_self): - return _is_generic(w_self, _isalpha) - -def str_isalnum__String(space, w_self): - return _is_generic(w_self, _isalnum) - -def str_isupper__String(space, w_self): - return _is_generic(w_self, _isupper) - -def str_islower__String(space, w_self): - return _is_generic(w_self, _islower) - -def str_istitle__String(space, w_self): - input = space.unwrap(w_self) - prev_letter='!' - - for pos in range(0, len(input)): - ch = input[pos] - if ch.isalpha(): - if (prev_letter.isalpha() and ch.isupper()) or \ - (not prev_letter.isalpha() and ch.islower()): - return space.w_False - prev_letter = ch - - return space.w_True - -def str_upper__String(space, w_self): - self = space.unwrap(w_self) - res = [' '] * len(self) - for i in range(len(self)): - ch = self[i] - if _islower(ch): - o = ord(ch) - 32 - res[i] = chr(o) - else: - res[i] = ch - - return space.wrap("".join(res)) - -def str_lower__String(space, w_self): - self = space.unwrap(w_self) - res = [' '] * len(self) - for i in range(len(self)): - ch = self[i] - if _isupper(ch): - o = ord(ch) + 32 - res[i] = chr(o) - else: - res[i] = ch - - return space.wrap("".join(res)) - -def str_swapcase__String(space, w_self): - self = space.unwrap(w_self) - res = [' '] * len(self) - for i in range(len(self)): - ch = self[i] - if _isupper(ch): - o = ord(ch) + 32 - res[i] = chr(o) - elif _islower(ch): - o = ord(ch) - 32 - res[i] = chr(o) - else: - res[i] = ch - - return space.wrap("".join(res)) - - -def str_capitalize__String(space, w_self): - input = space.unwrap(w_self) - buffer = [' '] * len(input) - if len(input) > 0: - ch = input[0] - if _islower(ch): - o = ord(ch) - 32 - buffer[0] = chr(o) - else: - buffer[0] = ch - - for i in range(1, len(input)): - ch = input[i] - if _isupper(ch): - o = ord(ch) + 32 - buffer[i] = chr(o) - else: - buffer[i] = ch - - return space.wrap("".join(buffer)) - -def str_title__String(space, w_self): - input = space.unwrap(w_self) - buffer = [' '] * len(input) - prev_letter=' ' - - for pos in range(0, len(input)): - ch = input[pos] - if not prev_letter.isalpha(): - buffer[pos] = ch.upper() - else: - buffer[pos] = ch.lower() - - prev_letter = buffer[pos] - - return space.wrap("".join(buffer)) - -def str_split__String_None_Int(space, w_self, w_none, w_maxsplit=-1): - res = [] - inword = 0 - u = space.unwrap - value = u(w_self) - maxsplit = u(w_maxsplit) - pos = 0 - - for ch in value: - if ch.isspace(): - if inword: - inword = 0 - else: - if inword: - res[-1] += ch - else: - if maxsplit > -1: - if maxsplit == 0: - res.append(value[pos:]) - break - maxsplit = maxsplit - 1 - res.append(ch) - inword = 1 - pos = pos + 1 - - for i in range(len(res)): - res[i] = W_StringObject(space, res[i]) - return W_ListObject(space, res) - -def str_split__String_String_Int(space, w_self, w_by, w_maxsplit=-1): - u = space.unwrap - res = [] - start = 0 - value = u(w_self) - by = u(w_by) - bylen = len(by) - maxsplit = u(w_maxsplit) - - #if maxsplit is default, then you have no limit - #of the length of the resulting array - if maxsplit == -1: - splitcount = 1 - else: - splitcount = maxsplit - - while splitcount: - next = _find(value, by, start, len(value), 1) - #next = value.find(by, start) #of course we cannot use - #the find method, - if next < 0: - res.append(value[start:]) - start = len(value) + 1 - break - res.append(value[start:next]) - start = next + bylen - #decrese the counter only then, when - #we don't have default maxsplit - if maxsplit > -1: - splitcount = splitcount - 1 - - if start < len(value): - res.append(value[start:]) - - for i in range(len(res)): - res[i] = W_StringObject(w_self.space, res[i]) - return W_ListObject(w_self.space, res) - -def str_join__String_ANY(space, w_self, w_list): - u = space.unwrap - list = space.unpackiterable(w_list) - if list: - self = u(w_self) - firstelem = 1 - listlen = 0 - reslen = 0 - #compute the length of the resulting string - for i in range(len(list)): - if not space.is_true(space.isinstance(list[i], space.w_str)): - if space.is_true(space.isinstance(list[i], space.w_unicode)): - w_u = space.call_function(space.w_unicode, w_self) - return space.call_method(w_u, "join", w_list) - raise OperationError( - space.w_TypeError, - space.wrap("sequence item %d: expected string, %s " - "found"%(i, space.type(list[i]).name))) - reslen = reslen + len(u(list[i])) - listlen = listlen + 1 - - reslen = reslen + (listlen - 1) * len(self) - - #allocate the string buffer - res = [' '] * reslen - - pos = 0 - #fill in the string buffer - for w_item in list: - item = u(w_item) - if firstelem: - for i in range(len(item)): - res[i+pos] = item[i] - pos = pos + len(item) - firstelem = 0 - else: - for i in range(len(self)): - res[i+pos] = self[i] - pos = pos + len(self) - - for i in range(len(item)): - res[i+pos] = item[i] - pos = pos + len(item) - - return space.wrap("".join(res)) - else: - return space.wrap("") - - -def str_rjust__String_ANY(space, w_self, w_arg): - u = space.unwrap - - u_arg = u(w_arg) - u_self = u(w_self) - - d = u_arg - len(u_self) - if d>0: - u_self = d * ' ' + u_self - - return space.wrap(u_self) - - -def str_ljust__String_ANY(space, w_self, w_arg): - u = space.unwrap - - u_self = u(w_self) - u_arg = u(w_arg) - - d = u_arg - len(u_self) - if d>0: - u_self += d * ' ' - - return space.wrap(u_self) - -def _convert_idx_params(space, w_self, w_sub, w_start, w_end): - u = space.unwrap - start = u(w_start) - end = u(w_end) - self = u(w_self) - sub = u(w_sub) - if start is None: - start = 0 - if end is None: - end = len(self) - - return (self, sub, start, end) - - -def str_find__String_String_ANY_ANY(space, w_self, w_sub, w_start=None, w_end=None): - - (self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end) - res = _find(self, sub, start, end, 1) - return space.wrap(res) - -def str_rfind__String_String_ANY_ANY(space, w_self, w_sub, w_start=None, w_end=None): - - (self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end) - res = _find(self, sub, start, end, -1) - return space.wrap(res) - -def str_index__String_String_ANY_ANY(space, w_self, w_sub, w_start=None, w_end=None): - - (self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end) - res = _find(self, sub, start, end, 1) - - if res == -1: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.index")) - - return space.wrap(res) - - -def str_rindex__String_String_ANY_ANY(space, w_self, w_sub, w_start=None, w_end=None): - - (self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end) - res = _find(self, sub, start, end, -1) - if res == -1: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.rindex")) - - return space.wrap(res) - - -def str_replace__String_String_String_Int(space, w_self, w_sub, w_by, w_maxsplit=-1): - u = space.unwrap - - input = u(w_self) - sub = u(w_sub) - by = u(w_by) - maxsplit = u(w_maxsplit) #I don't use it now - - #print "from replace, input: %s, sub: %s, by: %s" % (input, sub, by) - - #what do we have to replace? - startidx = 0 - endidx = len(input) - indices = [] - foundidx = _find(input, sub, startidx, endidx, 1) - while foundidx > -1 and (maxsplit == -1 or maxsplit > 0): - indices.append(foundidx) - if len(sub) == 0: - #so that we go forward, even if sub is empty - startidx = foundidx + 1 - else: - startidx = foundidx + len(sub) - foundidx = _find(input, sub, startidx, endidx, 1) - if maxsplit != -1: - maxsplit = maxsplit - 1 - indiceslen = len(indices) - buf = [' '] * (len(input) - indiceslen * len(sub) + indiceslen * len(by)) - startidx = 0 - - #ok, so do it - bufpos = 0 - for i in range(indiceslen): - for j in range(startidx, indices[i]): - buf[bufpos] = input[j] - bufpos = bufpos + 1 - - for j in range(len(by)): - buf[bufpos] = by[j] - bufpos = bufpos + 1 - - startidx = indices[i] + len(sub) - - for j in range(startidx, len(input)): - buf[bufpos] = input[j] - bufpos = bufpos + 1 - return space.wrap("".join(buf)) - -def _find(self, sub, start, end, dir): - - length = len(self) - - #adjust_indicies - if (end > length): - end = length - elif (end < 0): - end += length - if (end < 0): - end = 0 - if (start < 0): - start += length - if (start < 0): - start = 0 - - if dir > 0: - if len(sub) == 0 and start < end: - return start - - end = end - len(sub) + 1 - - for i in range(start, end): - match = 1 - for idx in range(len(sub)): - if sub[idx] != self[idx+i]: - match = 0 - break - if match: - return i - return -1 - else: - if len(sub) == 0 and start < end: - return end - - end = end - len(sub) - - for j in range(end, start-1, -1): - match = 1 - for idx in range(len(sub)): - if sub[idx] != self[idx+j]: - match = 0 - break - if match: - return j - return -1 - - -def _strip(space, w_self, w_chars, left, right): - "internal function called by str_xstrip methods" - u_self = space.unwrap(w_self) - u_chars = space.unwrap(w_chars) - - if u_self == None or u_chars == None: - return w_self - - lpos = 0 - rpos = len(u_self) - - if left: - #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, u_self[lpos],w_chars) - while lpos < rpos and u_self[lpos] in u_chars: - lpos += 1 - - if right: - while rpos > 0 and u_self[rpos - 1] in u_chars: - rpos -= 1 - - return space.wrap(u_self[lpos:rpos]) - - -def str_strip__String_String(space, w_self, w_chars): - return _strip(space, w_self, w_chars, left=1, right=1) - - -def str_rstrip__String_String(space, w_self, w_chars): - return _strip(space, w_self, w_chars, left=0, right=1) - - -def str_lstrip__String_String(space, w_self, w_chars): - return _strip(space, w_self, w_chars, left=1, right=0) - - -def str_center__String_Int(space, w_self, w_arg): - u_self = space.unwrap(w_self) - u_arg = space.unwrap(w_arg) - - d = u_arg - len(u_self) - if d>0: - offset = d//2 - u_centered = offset * ' ' + u_self + (d - offset) * ' ' - else: - u_centered = u_self - - return W_StringObject(space, u_centered) - - -def str_count__String_String_ANY_ANY(space, w_self, w_arg, w_start, w_end): - u_self = space.unwrap(w_self) - u_arg = space.unwrap(w_arg) - u_start = space.unwrap(w_start) - u_end = space.unwrap(w_end) - - - if u_end == None: - u_end = len(u_self) - elif u_end < 0: - u_end += len(u_self) - - if u_start == None: u_start = 0 - - area = u_self [u_start:u_end] - - count = 0 - - pos = -1 - while 1: - pos = _find(area, u_arg, pos+1, u_end, 1) - #pos = area.find(u_arg, pos+1, u_end) - if pos == -1: - break - count += 1 - - return W_IntObject(space, count) - - -#[optional arguments not supported now] -def str_endswith__String_String(space, w_self, w_end): - u_self = space.unwrap(w_self) - u_end = space.unwrap(w_end) - - found = 0 - if u_end: - endlen = len(u_end) - if endlen <= len(u_self): - found = (u_end == u_self[-endlen:]) - else: - found = 1 - - return W_IntObject(space, found) - - -#[optional arguments not supported now] -def str_startswith__String_String(space, w_self, w_start): - u_self = space.unwrap(w_self) - u_start = space.unwrap(w_start) - - found = 0 - if u_start: - startlen = len(u_start) - if startlen <= len(u_self): - found = (u_start == u_self[:startlen]) - else: - found = 1 - - return W_IntObject(space, found) - - -def _tabindent(u_token, u_tabsize): - "calculates distance behind the token to the next tabstop" - - distance = u_tabsize - if u_token: - distance = 0 - offset = len(u_token) - - while 1: - #no sophisticated linebreak support now, '\r' just for passing adapted CPython test - if u_token[offset-1] == "\n" or u_token[offset-1] == "\r": - break; - distance += 1 - offset -= 1 - if offset == 0: - break - - #the same like distance = len(u_token) - (offset + 1) - #print '<offset:%d distance:%d tabsize:%d token:%s>' % (offset, distance, u_tabsize, u_token) - distance = (u_tabsize-distance) % u_tabsize - if distance == 0: - distance=u_tabsize - - return distance - - -def str_expandtabs__String_Int(space, w_self, w_tabsize): - u_self = space.unwrap(w_self) - u_tabsize = space.unwrap(w_tabsize) - - u_expanded = "" - if u_self: - split = u_self.split("\t") #XXX use pypy split - u_expanded =oldtoken = split.pop(0) - - for token in split: - #print "%d#%d -%s-" % (_tabindent(oldtoken,u_tabsize), u_tabsize, token) - u_expanded += " " * _tabindent(oldtoken,u_tabsize) + token - oldtoken = token - - return W_StringObject(space, u_expanded) - - -def str_splitlines__String_Int(space, w_self, w_keepends): - u_self = space.unwrap(w_self) - u_keepends = space.unwrap(w_keepends) - selflen = len(u_self) - - L = [] - pos = 0 - while 1: - oldpos = pos - pos = _find(u_self, '\n', pos, selflen, 1) + 1 - if pos > oldpos: - w_item = space.wrap(u_self[oldpos:pos]) - if not u_keepends: - w_item = _strip(space, w_item, W_StringObject(space,'\n'), left=0, right=1) - L.append(w_item) - else: - break - return W_ListObject(space, L) - -def str_zfill__String_Int(space, w_self, w_width): - u = space.unwrap - input = u(w_self) - width = u(w_width) - - if len(input) >= width: - return w_self - - b = width - len(input) - - buf = [' '] * width - if len(input) > 0 and (input[0] == '+' or input[0] == '-'): - buf[0] = input[0] - start = 1 - middle = width - len(input) + 1 - else: - start = 0 - middle = width - len(input) - - for i in range(start, middle): - buf[i] = '0' - - for i in range(middle, width): - buf[i] = input[start] - start = start + 1 - - return space.wrap("".join(buf)) - - -def app_str_translate__String_String_String(s, table, deletechars=''): - """charfilter - unicode handling is not implemented - - Return a copy of the string where all characters occurring - in the optional argument deletechars are removed, and the - remaining characters have been mapped through the given translation table, - which must be a string of length 256""" - - if len(table) < 256: - raise ValueError("translation table must be 256 characters long") - - L = [ table[ord(s[i])] for i in range(len(s)) if s[i] not in deletechars ] - return ''.join(L) - -str_translate__String_String_String = gateway.app2interp(app_str_translate__String_String_String) - - -def unwrap__String(space, w_str): - return w_str._value - -def hash__String(space, w_str): - return W_IntObject(space, hash(space.unwrap(w_str))) - - -##EQ = 1 -##LE = 2 -##GE = 3 -##GT = 4 -##LT = 5 -##NE = 6 - - -##def string_richcompare(space, w_str1, w_str2, op): -## str1 = w_str1._value -## str2 = w_str2._value - -## if space.is_true(space.is_(w_str1, w_str2)): -## if op == EQ or op == LE or op == GE: -## return space.w_True -## elif op == GT or op == LT or op == NE: -## return space.w_False -## if 0: -## pass -## else: -## if op == EQ: -## if len(str1) == len(str2): -## for i in range(len(str1)): -## if ord(str1[i]) != ord(str2[i]): -## return space.w_False -## return space.w_True -## else: -## return space.w_False -## else: -## if len(str1) > len(str2): -## min_len = len(str2) -## else: -## min_len = len(str1) - -## c = 0 -## idx = 0 -## if (min_len > 0): -## while (c == 0) and (idx < min_len): -## c = ord(str1[idx]) - ord(str2[idx]) -## idx = idx + 1 -## else: -## c = 0 - -## if (c == 0): -## if len(str1) < len(str2): -## c = -1 -## elif len(str1) > len(str2): -## c = 1 -## else: -## c = 0 - -## if op == LT: -## return space.newbool(c < 0) -## elif op == LE: -## return space.newbool(c <= 0) -## elif op == NE: -## return space.newbool(c != 0) -## elif op == GT: -## return space.newbool(c > 0) -## elif op == GE: -## return space.newbool(c >= 0) -## else: -## return NotImplemented - -def lt__String_String(space, w_str1, w_str2): - s1 = w_str1._value - s2 = w_str2._value - if s1 < s2: - return space.w_True - else: - return space.w_False - -def le__String_String(space, w_str1, w_str2): - s1 = w_str1._value - s2 = w_str2._value - if s1 <= s2: - return space.w_True - else: - return space.w_False - -def eq__String_String(space, w_str1, w_str2): - s1 = w_str1._value - s2 = w_str2._value - if s1 == s2: - return space.w_True - else: - return space.w_False - -def ne__String_String(space, w_str1, w_str2): - s1 = w_str1._value - s2 = w_str2._value - if s1 != s2: - return space.w_True - else: - return space.w_False - -def gt__String_String(space, w_str1, w_str2): - s1 = w_str1._value - s2 = w_str2._value - if s1 > s2: - return space.w_True - else: - return space.w_False - -def ge__String_String(space, w_str1, w_str2): - s1 = w_str1._value - s2 = w_str2._value - if s1 >= s2: - return space.w_True - else: - return space.w_False - -def getitem__String_Int(space, w_str, w_int): - u = space.unwrap - ival = w_int.intval - str = u(w_str) - slen = len(u(w_str)) - if ival < 0: - ival += slen - if ival < 0 or ival >= slen: - exc = space.call_function(space.w_IndexError, - space.wrap("string index out of range")) - raise OperationError(space.w_IndexError, exc) - return W_StringObject(space, str[ival]) - -def getitem__String_Slice(space, w_str, w_slice): - # XXX this is really too slow for slices with no step argument - w = space.wrap - length = len(w_str._value) - start, stop, step, sl = slicetype.indices4(space, w_slice, length) - r = [space.getitem(w_str, w(start + i*step)) for i in range(sl)] - w_r = space.newlist(r) - w_empty = space.newstring([]) - return str_join__String_ANY(space, w_empty, w_r) - -def mul__String_Int(space, w_str, w_mul): - u = space.unwrap - input = u(w_str) - mul = u(w_mul) - - buffer = [' '] * (mul*len(input)) - - pos = 0 - for i in range(mul): - for j in range(len(input)): - buffer[pos] = input[j] - pos = pos + 1 - - return space.wrap("".join(buffer)) - -def mul__Int_String(space, w_mul, w_str): - return mul__String_Int(space, w_str, w_mul) - -def add__String_String(space, w_left, w_right): - u = space.unwrap - right = u(w_right) - left = u(w_left) - buf = [' '] * (len(left) + len(right)) - for i in range(len(left)): - buf[i] = left[i] - for i in range(len(right)): - buf[i+len(left)] = right[i] - return space.wrap("".join(buf)) - -def len__String(space, w_str): - return space.wrap(len(space.unwrap(w_str))) - -def str__String(space, w_str): - return w_str - - -def iter__String(space, w_list): - from pypy.objspace.std import iterobject - return iterobject.W_SeqIterObject(space, w_list) - -def app_contains__String_String(self, sub): - return self.find(sub) >= 0 - -contains__String_String = gateway.app2interp(app_contains__String_String) - -def app_repr__String(s): - quote = "'" - if quote in s and '"' not in s: - quote = '"' - - repr = quote - - for c in s: - if c == '\\' or c == quote: repr += '\\'+c - elif c == '\t': repr += '\\t' - elif c == '\r': repr += '\\r' - elif c == '\n': repr += '\\n' - elif not '\x20' <= c < '\x7f': - n = ord(c) - repr += '\\x'+"0123456789abcdef"[n>>4]+"0123456789abcdef"[n&0xF] - else: - repr += c - - repr += quote - - return repr - -repr__String = gateway.app2interp(app_repr__String) - - -def ord__String(space, w_str): - if len(w_str._value) != 1: - raise OperationError( - space.w_TypeError, - space.wrap("ord() expected a character, but string " - "of length %d found"%(len(w_str._value),))) - return space.wrap(ord(space.unwrap(w_str))) - -def app_mod__String_ANY(format, values): - import _formatting - if isinstance(values, tuple): - return _formatting.format(format, values, None) - else: - if hasattr(values, 'keys'): - return _formatting.format(format, (values,), values) - else: - return _formatting.format(format, (values,), None) - -mod__String_ANY = gateway.app2interp(app_mod__String_ANY) - - -def app_str_decode__String_ANY_ANY(str, encoding=None, errors=None): - if encoding is None and errors is None: - return unicode(str) - elif errors is None: - return unicode(str, encoding) - else: - return unicode(str, encoding, errors) - - -str_decode__String_ANY_ANY = gateway.app2interp(app_str_decode__String_ANY_ANY) - -# register all methods -from pypy.objspace.std import stringtype -register_all(vars(), stringtype) diff --git a/pypy/objspace/std/stringtype.py b/pypy/objspace/std/stringtype.py deleted file mode 100644 index b0c32f5f25..0000000000 --- a/pypy/objspace/std/stringtype.py +++ /dev/null @@ -1,60 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.basestringtype import basestring_typedef - - -str_join = MultiMethod('join', 2) -str_split = MultiMethod('split', 3, defaults=(None,-1)) -str_isdigit = MultiMethod('isdigit', 1) -str_isalpha = MultiMethod('isalpha', 1) -str_isspace = MultiMethod('isspace', 1) -str_isupper = MultiMethod('isupper', 1) -str_islower = MultiMethod('islower', 1) -str_istitle = MultiMethod('istitle', 1) -str_isalnum = MultiMethod('isalnum', 1) -str_ljust = MultiMethod('ljust', 2) -str_rjust = MultiMethod('rjust', 2) -str_upper = MultiMethod('upper', 1) -str_lower = MultiMethod('lower', 1) -str_swapcase = MultiMethod('swapcase', 1) -str_capitalize = MultiMethod('capitalize', 1) -str_title = MultiMethod('title', 1) -str_find = MultiMethod('find', 4, defaults=(None, None)) -str_rfind = MultiMethod('rfind', 4, defaults=(None, None)) -str_index = MultiMethod('index', 4, defaults=(None, None)) -str_rindex = MultiMethod('rindex', 4, defaults=(None, None)) -str_replace = MultiMethod('replace', 4, defaults=(-1,)) -str_zfill = MultiMethod('zfill', 2) -str_strip = MultiMethod('strip', 2, defaults=('', ' ')) -str_rstrip = MultiMethod('rstrip', 2, defaults=('', ' ')) -str_lstrip = MultiMethod('lstrip', 2, defaults=('', ' ')) -str_center = MultiMethod('center', 2, ) -str_count = MultiMethod('count', 4, defaults=(None,None)) -str_endswith = MultiMethod('endswith', 2) #[optional arguments not supported now] -str_expandtabs = MultiMethod('expandtabs', 2, defaults=(8,)) -str_splitlines = MultiMethod('splitlines', 2, defaults=(0,)) -str_startswith = MultiMethod('startswith', 2) #[optional arguments not supported now] -str_translate = MultiMethod('translate', 3, defaults=('',)) #unicode mimic not supported now -str_decode = MultiMethod('decode', 3, defaults=(None, None)) - -# ____________________________________________________________ - -def descr__new__(space, w_stringtype, w_obj=None): - from pypy.objspace.std.stringobject import W_StringObject - if w_obj is None: - value = '' - else: - w_obj = space.str(w_obj) - if space.is_true(space.is_(w_stringtype, space.w_str)): - return w_obj # XXX might be reworked when space.str() typechecks - value = space.unwrap(w_obj) - assert isinstance(value, str) # XXX should be checked by space.str() - w_obj = space.allocate_instance(W_StringObject, w_stringtype) - w_obj.__init__(space, value) - return w_obj - -# ____________________________________________________________ - -str_typedef = StdTypeDef("str", basestring_typedef, - __new__ = newmethod(descr__new__), - ) -str_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/strutil.py b/pypy/objspace/std/strutil.py deleted file mode 100644 index 939b9e77a8..0000000000 --- a/pypy/objspace/std/strutil.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Pure Python implementation of string utilities. -""" - -# XXX factor more functions out of stringobject.py. -# This module is independent from PyPy. - -def strip_spaces(s): - # XXX this is not locate-dependent - p = 0 - q = len(s) - while p < q and s[p] in ' \f\n\r\t\v': - p += 1 - while p < q and s[q-1] in ' \f\n\r\t\v': - q -= 1 - return s[p:q] - -class InvalidLiteral(Exception): - pass - -def _parse_string(s, literal, base, fname): - # internal utility for string_to_int() and string_to_long(). - sign = 1 - if s.startswith('-'): - sign = -1 - s = s[1:] - elif s.startswith('+'): - s = s[1:] - if base == 0: - if s.startswith('0x') or s.startswith('0X'): - base = 16 - elif s.startswith('0'): - base = 8 - else: - base = 10 - elif base < 2 or base > 36: - raise ValueError, "%s() base must be >= 2 and <= 36" % (fname,) - try: - if not s: - raise InvalidLiteral - if base == 16 and (s.startswith('0x') or s.startswith('0X')): - s = s[2:] - # XXX uses int-to-long overflow so far - result = 0 - for c in s: - digit = ord(c) - if '0' <= c <= '9': - digit -= ord('0') - elif 'A' <= c <= 'Z': - digit = (digit - ord('A')) + 10 - elif 'a' <= c <= 'z': - digit = (digit - ord('a')) + 10 - else: - raise InvalidLiteral - if digit >= base: - raise InvalidLiteral - result = result*base + digit - return result * sign - except InvalidLiteral: - if literal: - raise ValueError, 'invalid literal for %s(): %s' % (fname, literal) - else: - raise ValueError, 'empty literal for %s()' % (fname,) - -def string_to_int(s, base=10): - """Utility to converts a string to an integer (or possibly a long). - If base is 0, the proper base is guessed based on the leading - characters of 's'. Raises ValueError in case of error. - """ - s = literal = strip_spaces(s) - return _parse_string(s, literal, base, 'int') - -def string_to_long(s, base=10): - """As string_to_int(), but ignores an optional 'l' or 'L' suffix.""" - s = literal = strip_spaces(s) - if (s.endswith('l') or s.endswith('L')) and base < 22: - # in base 22 and above, 'L' is a valid digit! try: long('L',22) - s = s[:-1] - return long(_parse_string(s, literal, base, 'long')) diff --git a/pypy/objspace/std/test/__init__.py b/pypy/objspace/std/test/__init__.py deleted file mode 100644 index 1bb8bf6d7f..0000000000 --- a/pypy/objspace/std/test/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/pypy/objspace/std/test/autopath.py b/pypy/objspace/std/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/objspace/std/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/objspace/std/test/broken_test_floatobject.py b/pypy/objspace/std/test/broken_test_floatobject.py deleted file mode 100644 index aebbcae63f..0000000000 --- a/pypy/objspace/std/test/broken_test_floatobject.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python -import unittest -import sys -import os - -print "not working, rewrite this test!" -sys.exit(0) - -#Start HACK - -####################################### -# Workaround to give the modules -# an "objectspace" -####################################### - -import pypy.objspace -thisdir = os.getcwd() -syspath = sys.path -sys.path.insert(0,thisdir) -sys.path.append('..') - - -####################################### -# import the module you want to test -# import yourmodule -####################################### - -os.chdir('..') -import floatobject as fl -os.chdir(thisdir) - -# End HACK - -True,False = (1==1),(1==0) - -class TestW_FloatObject(unittest.TestCase): - - def setUp(self): - self.space = objspace.StdObjSpace - - def tearDown(self): - pass - - def test_float(self): - f1 = fl.W_FloatObject(1.0) - result = fl.float_float(self.space,f1) - assert result == f1 - - def test_repr(self): - x = 1.0 - f1 = fl.W_FloatObject(x) - result = fl.float_repr(self.space,f1) - assert self.space.unwrap(result) == repr(x) - - def test_str(self): - x = 1.0 - f1 = fl.W_FloatObject(x) - result = fl.float_str(self.space,f1) - assert self.space.unwrap(result) == str(x) - - def test_hash(self): - x = 1.0 - f1 = fl.W_FloatObject(x) - result = fl.float_hash(self.space,f1) - assert self.space.unwrap(result) == hash(x) - - def test_add(self): - f1 = fl.W_FloatObject(1.0) - f2 = fl.W_FloatObject(2.0) - result = fl.float_float_add(self.space,f1,f2) - assert result.floatval == 3.0 - - def test_sub(self): - f1 = fl.W_FloatObject(1.0) - f2 = fl.W_FloatObject(2.0) - result = fl.float_float_sub(self.space,f1,f2) - assert result.floatval == -1.0 - - def test_mul(self): - f1 = fl.W_FloatObject(1.0) - f2 = fl.W_FloatObject(2.0) - result = fl.float_float_mul(self.space,f1,f2) - assert result.floatval == 2.0 - - def test_div(self): - f1 = fl.W_FloatObject(1.0) - f2 = fl.W_FloatObject(2.0) - result = fl.float_float_div(self.space,f1,f2) - assert result.floatval == 0.5 - - def test_mod(self): - x = 1.0 - y = 2.0 - f1 = fl.W_FloatObject(x) - f2 = fl.W_FloatObject(y) - v = fl.float_float_mod(self.space,f1,f2) - assert v.floatval == x % y - - def test_divmod(self): - x = 1.0 - y = 2.0 - f1 = fl.W_FloatObject(x) - f2 = fl.W_FloatObject(y) - wrappedTuple = fl.float_float_divmod(self.space,f1,f2) - v,w = self.space.unwrap(wrappedTuple) - assert (v.floatval,w.floatval) == divmod(x,y) - - def test_pow(self): - x = 1.0 - y = 2.0 - f1 = fl.W_FloatObject(x) - f2 = fl.W_FloatObject(y) - v = fl.float_float_pow(self.space,f1,f2) - assert v.floatval == x ** y - - -if __name__ == '__main__': - unittest.main() diff --git a/pypy/objspace/std/test/broken_test_template.py b/pypy/objspace/std/test/broken_test_template.py deleted file mode 100644 index ca1e16f9e9..0000000000 --- a/pypy/objspace/std/test/broken_test_template.py +++ /dev/null @@ -1,35 +0,0 @@ -import sys -print "not working, do not run nor use this!" -sys.exit(0) - -import objspace -thisdir = os.getcwd() -syspath = sys.path -sys.path.insert(0,thisdir) -sys.path.append('..') - - -####################################### -# import the module you want to test -# import yourmodule -####################################### - -os.chdir('..') -#import your_object_class -os.chdir(thisdir) - -# End HACK -class TestYourObjectClass(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_test1(self): - pass - - -if __name__ == '__main__': - unittest.main() diff --git a/pypy/objspace/std/test/test_boolobject.py b/pypy/objspace/std/test/test_boolobject.py deleted file mode 100644 index 79cc3f6b48..0000000000 --- a/pypy/objspace/std/test/test_boolobject.py +++ /dev/null @@ -1,33 +0,0 @@ -import autopath - - -objspacename = 'std' - -class TestW_BoolObject: - - def setup_method(self,method): - self.true = self.space.w_True - self.false = self.space.w_False - self.wrap = self.space.wrap - - def test_repr(self): - assert self.space.eq_w(self.space.repr(self.true), self.wrap("True")) - assert self.space.eq_w(self.space.repr(self.false), self.wrap("False")) - - def test_true(self): - assert self.space.is_true(self.true) - - def test_false(self): - assert not self.space.is_true(self.false) - -class AppTestAppBoolTest: - def test_bool_callable(self): - assert True == bool(1) - assert False == bool(0) - assert False == bool() - - def test_bool_string(self): - assert "True" == str(True) - assert "False" == str(False) - assert "True" == repr(True) - assert "False" == repr(False) diff --git a/pypy/objspace/std/test/test_dictobject.py b/pypy/objspace/std/test/test_dictobject.py deleted file mode 100644 index 9471daac92..0000000000 --- a/pypy/objspace/std/test/test_dictobject.py +++ /dev/null @@ -1,327 +0,0 @@ -import autopath -from pypy.objspace.std.dictobject import W_DictObject - - -objspacename = 'std' - -class TestW_DictObject: - - def test_empty(self): - space = self.space - d = W_DictObject(space, []) - assert not self.space.is_true(d) - - def test_nonempty(self): - space = self.space - wNone = space.w_None - d = W_DictObject(space, [(wNone, wNone)]) - assert space.is_true(d) - i = space.getitem(d, wNone) - equal = space.eq(i, wNone) - assert space.is_true(equal) - - def test_setitem(self): - space = self.space - wk1 = space.wrap('key') - wone = space.wrap(1) - d = W_DictObject(space, [(space.wrap('zero'),space.wrap(0))]) - space.setitem(d,wk1,wone) - wback = space.getitem(d,wk1) - assert self.space.eq_w(wback,wone) - - def test_delitem(self): - space = self.space - wk1 = space.wrap('key') - d = W_DictObject(space, - [(space.wrap('zero'),space.wrap(0)), - (space.wrap('one'),space.wrap(1)), - (space.wrap('two'),space.wrap(2))]) - space.delitem(d,space.wrap('one')) - assert self.space.eq_w(space.getitem(d,space.wrap('zero')),space.wrap(0)) - assert self.space.eq_w(space.getitem(d,space.wrap('two')),space.wrap(2)) - self.space.raises_w(self.space.w_KeyError, - space.getitem,d,space.wrap('one')) - - def test_wrap_dict(self): - assert isinstance(self.space.wrap({}), W_DictObject) - - - def test_dict_compare(self): - w = self.space.wrap - w0, w1, w2, w3 = map(w, range(4)) - wd1 = self.space.newdict([(w0, w1), (w2, w3)]) - wd2 = self.space.newdict([(w2, w3), (w0, w1)]) - assert self.space.eq_w(wd1, wd2) - wd3 = self.space.newdict([(w2, w2), (w0, w1)]) - assert not self.space.eq_w(wd1, wd3) - wd4 = self.space.newdict([(w3, w3), (w0, w1)]) - assert not self.space.eq_w(wd1, wd4) - wd5 = self.space.newdict([(w3, w3)]) - assert not self.space.eq_w(wd1, wd4) - - def test_dict_call(self): - space = self.space - w = space.wrap - wd = space.newdict - def mydict(w_args=w(()), w_kwds=w({})): - return space.call(space.w_dict, w_args, w_kwds) - def deepwrap(lp): - return [[w(a),w(b)] for a,b in lp] - d = mydict() - assert self.space.eq_w(d, w({})) - args = w(([['a',2],[23,45]],)) - d = mydict(args) - assert self.space.eq_w(d, wd(deepwrap([['a',2],[23,45]]))) - d = mydict(args, w({'a':33, 'b':44})) - assert self.space.eq_w(d, wd(deepwrap([['a',33],['b',44],[23,45]]))) - d = mydict(w_kwds=w({'a':33, 'b':44})) - assert self.space.eq_w(d, wd(deepwrap([['a',33],['b',44]]))) - self.space.raises_w(space.w_TypeError, mydict, w((23,))) - self.space.raises_w(space.w_ValueError, mydict, w(([[1,2,3]],))) - - def test_dict_pop(self): - space = self.space - w = space.wrap - wd = space.newdict - def mydict(w_args=w(()), w_kwds=w({})): - return space.call(space.w_dict, w_args, w_kwds) - d = mydict(w_kwds=w({"1":2, "3":4})) - dd = mydict(w_kwds=w({"1":2, "3":4})) # means d.copy() - pop = space.getattr(dd, w("pop")) - result = space.call_function(pop, w("1")) - assert self.space.eq_w(result, w(2)) - assert self.space.eq_w(space.len(dd), w(1)) - - dd = mydict(w_kwds=w({"1":2, "3":4})) # means d.copy() - pop = space.getattr(dd, w("pop")) - result = space.call_function(pop, w("1"), w(44)) - assert self.space.eq_w(result, w(2)) - assert self.space.eq_w(space.len(dd), w(1)) - result = space.call_function(pop, w("1"), w(44)) - assert self.space.eq_w(result, w(44)) - assert self.space.eq_w(space.len(dd), w(1)) - - self.space.raises_w(space.w_KeyError, space.call_function, pop, w(33)) - - def test_get(self): - space = self.space - w = space.wrap - def mydict(w_args=w(()), w_kwds=w({})): - return space.call(space.w_dict, w_args, w_kwds) - d = mydict(w_kwds=w({"1":2, "3":4})) - get = space.getattr(d, w("get")) - assert self.space.eq_w(space.call_function(get, w("1")), w(2)) - assert self.space.eq_w(space.call_function(get, w("1"), w(44)), w(2)) - assert self.space.eq_w(space.call_function(get, w("33")), w(None)) - assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) - - - -class AppTest_DictObject: - - def test_equality(self): - d = {1:2} - f = {1:2} - assert d == f - assert d != {1:3} - - def test_clear(self): - d = {1:2, 3:4} - d.clear() - assert len(d) == 0 - - def test_copy(self): - d = {1:2, 3:4} - dd = d.copy() - assert d == dd - assert not d is dd - - def tooslow_test_get(self): - d = {1:2, 3:4} - assert d.get(1) == 2 - assert d.get(1,44) == 2 - assert d.get(33) == None - assert d.get(33,44) == 44 - - def tooslow_test_pop(self): - d = {1:2, 3:4} - dd = d.copy() - result = dd.pop(1) - assert result == 2 - assert len(dd) == 1 - dd = d.copy() - result = dd.pop(1, 44) - assert result == 2 - assert len(dd) == 1 - result = dd.pop(1, 44) - assert result == 44 - assert len(dd) == 1 - raises(KeyError, dd.pop, 33) - - def test_has_key(self): - d = {1:2, 3:4} - assert d.has_key(1) - assert not d.has_key(33) - - def test_items(self): - d = {1:2, 3:4} - its = d.items() - its.sort() - assert its == [(1,2),(3,4)] - - def test_iteritems(self): - d = {1:2, 3:4} - dd = d.copy() - for k, v in d.iteritems(): - assert v == dd[k] - del dd[k] - assert not dd - - def test_iterkeys(self): - d = {1:2, 3:4} - dd = d.copy() - for k in d.iterkeys(): - del dd[k] - assert not dd - - def test_itervalues(self): - d = {1:2, 3:4} - values = [] - for k in d.itervalues(): - values.append(k) - assert values == d.values() - - def test_keys(self): - d = {1:2, 3:4} - kys = d.keys() - kys.sort() - assert kys == [1,3] - - def test_popitem(self): - d = {1:2, 3:4} - it = d.popitem() - assert len(d) == 1 - assert it==(1,2) or it==(3,4) - it1 = d.popitem() - assert len(d) == 0 - assert (it!=it1) and (it1==(1,2) or it1==(3,4)) - - def test_setdefault(self): - d = {1:2, 3:4} - dd = d.copy() - x = dd.setdefault(1, 99) - assert d == dd - assert x == 2 - x = dd.setdefault(33, 99) - d[33] = 99 - assert d == dd - assert x == 99 - - def test_update(self): - d = {1:2, 3:4} - dd = d.copy() - d.update({}) - assert d == dd - d.update({3:5, 6:7}) - assert d == {1:2, 3:5, 6:7} - - def test_values(self): - d = {1:2, 3:4} - vals = d.values() - vals.sort() - assert vals == [2,4] - - def test_eq(self): - d1 = {1:2, 3:4} - d2 = {1:2, 3:4} - d3 = {1:2} - bool = d1 == d2 - assert bool == True - bool = d1 == d3 - assert bool == False - bool = d1 != d2 - assert bool == False - bool = d1 != d3 - assert bool == True - - def test_lt(self): - d1 = {1:2, 3:4} - d2 = {1:2, 3:4} - d3 = {1:2, 3:5} - d4 = {1:2} - bool = d1 < d2 - assert bool == False - bool = d1 < d3 - assert bool == True - bool = d1 < d4 - assert bool == False - - def test_str_repr(self): - assert '{}' == str({}) - assert '{1: 2}' == str({1: 2}) - assert "{'ba': 'bo'}" == str({'ba': 'bo'}) - # NOTE: the string repr depends on hash values of 1 and 'ba'!!! - ok_reprs = ["{1: 2, 'ba': 'bo'}", "{'ba': 'bo', 1: 2}"] - assert str({1: 2, 'ba': 'bo'}) in ok_reprs - assert '{}' == repr({}) - assert '{1: 2}' == repr({1: 2}) - assert "{'ba': 'bo'}" == repr({'ba': 'bo'}) - assert str({1: 2, 'ba': 'bo'}) in ok_reprs - - def test_new(self): - d = dict() - assert d == {} - args = [['a',2], [23,45]] - d = dict(args) - assert d == {'a':2, 23:45} - d = dict(args, a=33, b=44) - assert d == {'a':33, 'b':44, 23:45} - d = dict(a=33, b=44) - assert d == {'a':33, 'b':44} - d = dict({'a':33, 'b':44}) - assert d == {'a':33, 'b':44} - try: d = dict(23) - except (TypeError, ValueError): pass - else: self.fail("dict(23) should raise!") - try: d = dict([[1,2,3]]) - except (TypeError, ValueError): pass - else: self.fail("dict([[1,2,3]]) should raise!") - - def test_fromkeys(self): - assert {}.fromkeys([1, 2], 1) == {1: 1, 2: 1} - assert {}.fromkeys([1, 2]) == {1: None, 2: None} - assert {}.fromkeys([]) == {} - assert {1: 0, 2: 0, 3: 0}.fromkeys([1, '1'], 'j') == ( - {1: 'j', '1': 'j'}) - -# the minimal 'space' needed to use a W_DictObject -class FakeSpace: - def hash(self, obj): - return hash(obj) - def unwrap(self, x): - return x - def is_true(self, x): - return x - def is_(self, x, y): - return x is y - def eq(self, x, y): - return x == y - -from pypy.objspace.std.dictobject import getitem__Dict_ANY, setitem__Dict_ANY_ANY - -class TestDictImplementation: - - def setup_method(self,method): - self.space = FakeSpace() - - def test_stressdict(self): - from random import randint - d = W_DictObject(self.space, []) - N = 10000 - pydict = {} - for i in range(N): - x = randint(-N, N) - setitem__Dict_ANY_ANY(self.space, d, x, i) - pydict[x] = i - for x in pydict: - assert pydict[x] == getitem__Dict_ANY(self.space, d, x) diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py deleted file mode 100644 index 6f41196e32..0000000000 --- a/pypy/objspace/std/test/test_dictproxy.py +++ /dev/null @@ -1,20 +0,0 @@ -import autopath - -objspacename = 'std' - -class AppTestUserObject: - def test_dictproxy(self): - class NotEmpty: - a = 1 - assert isinstance(NotEmpty.__dict__, dict) == False - assert 'a' in NotEmpty.__dict__ - assert 'a' in NotEmpty.__dict__.keys() - assert 'b' not in NotEmpty.__dict__ - assert isinstance(NotEmpty.__dict__.copy(), dict) - assert NotEmpty.__dict__ == NotEmpty.__dict__.copy() - try: - NotEmpty.__dict__['b'] = 1 - except: - pass - else: - raise AssertionError, 'this should not have been writable' diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py deleted file mode 100644 index 724bd7befe..0000000000 --- a/pypy/objspace/std/test/test_floatobject.py +++ /dev/null @@ -1,67 +0,0 @@ -import autopath -from pypy.objspace.std import floatobject as fobj -from pypy.objspace.std.objspace import FailedToImplement - -objspacename = 'std' - -class TestW_FloatObject: - - def _unwrap_nonimpl(self, func, *args, **kwds): - """ make sure that the expected exception occurs, and unwrap it """ - try: - res = func(*args, **kwds) - raise Exception, "should have failed but returned '%s'!" %repr(res) - except FailedToImplement, arg: - return arg[0] - - def test_pow_fff(self): - x = 10.0 - y = 2.0 - z = 13.0 - f1 = fobj.W_FloatObject(self.space, x) - f2 = fobj.W_FloatObject(self.space, y) - f3 = fobj.W_FloatObject(self.space, z) - assert self.space.w_TypeError == ( - self._unwrap_nonimpl(fobj.pow__Float_Float_ANY, - self.space, f1, f2, f3)) - - def test_pow_ffn(self): - x = 10.0 - y = 2.0 - f1 = fobj.W_FloatObject(self.space, x) - f2 = fobj.W_FloatObject(self.space, y) - v = fobj.pow__Float_Float_ANY(self.space, f1, f2, self.space.w_None) - assert v.floatval == x ** y - f1 = fobj.W_FloatObject(self.space, -1.23) - f2 = fobj.W_FloatObject(self.space, -4.56) - assert self.space.w_ValueError == ( - self._unwrap_nonimpl(fobj.pow__Float_Float_ANY, - self.space, f1, f2, - self.space.w_None)) - -class AppTestAppFloatTest: - def test_negatives(self): - assert -1.1 < 0 - assert -0.1 < 0 - - def test_float_callable(self): - assert 0.125 == float(0.125) - - def test_float_int(self): - assert 42.0 == float(42) - - def test_float_string(self): - assert 42.0 == float("42") - - def test_round(self): - assert 1.0 == round(1.0) - assert 1.0 == round(1.1) - assert 2.0 == round(1.9) - assert 2.0 == round(1.5) - assert -2.0 == round(-1.5) - assert -2.0 == round(-1.5) - assert -2.0 == round(-1.5, 0) - assert -2.0 == round(-1.5, 0) - assert 22.2 == round(22.222222, 1) - assert 20.0 == round(22.22222, -1) - assert 0.0 == round(22.22222, -2) diff --git a/pypy/objspace/std/test/test_instmethobject.py b/pypy/objspace/std/test/test_instmethobject.py deleted file mode 100644 index b9026ad442..0000000000 --- a/pypy/objspace/std/test/test_instmethobject.py +++ /dev/null @@ -1,32 +0,0 @@ -import autopath - -# NB. instmethobject.py has been removed, -# but the following tests still make sense - -objspacename = 'std' - -class AppTestInstMethObjectApp: - def test_callBound(self): - boundMethod = [1,2,3].__len__ - assert boundMethod() == 3 - raises(TypeError, boundMethod, 333) - def test_callUnbound(self): - unboundMethod = list.__len__ - assert unboundMethod([1,2,3]) == 3 - raises(TypeError, unboundMethod) - raises(TypeError, unboundMethod, 333) - raises(TypeError, unboundMethod, [1,2,3], 333) - - def test_getBound(self): - def f(l,x): return l[x+1] - bound = f.__get__('abcdef') - assert bound(1) == 'c' - raises(TypeError, bound) - raises(TypeError, bound, 2, 3) - def test_getUnbound(self): - def f(l,x): return l[x+1] - unbound = f.__get__(None, str) - assert unbound('abcdef', 2) == 'd' - raises(TypeError, unbound) - raises(TypeError, unbound, 4) - raises(TypeError, unbound, 4, 5) diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py deleted file mode 100644 index 93119ff3bb..0000000000 --- a/pypy/objspace/std/test/test_intobject.py +++ /dev/null @@ -1,316 +0,0 @@ -import sys -import autopath -from pypy.objspace.std import intobject as iobj -from pypy.objspace.std.objspace import FailedToImplement - -objspacename = 'std' - -class TestW_IntObject: - - def _longshiftresult(self, x): - """ calculate an overflowing shift """ - n = 1 - l = long(x) - while 1: - ires = x << n - lres = l << n - if type(ires) is long or lres != ires: - return n - n += 1 - - def _unwrap_nonimpl(self, func, *args, **kwds): - """ make sure that the expected exception occours, and unwrap it """ - try: - res = func(*args, **kwds) - raise Exception, "should have failed but returned '%s'!" %repr(res) - except FailedToImplement, arg: - return arg[0] - - def test_repr(self): - x = 1 - f1 = iobj.W_IntObject(self.space, x) - result = iobj.repr__Int(self.space, f1) - assert self.space.unwrap(result) == repr(x) - - def test_str(self): - x = 12345 - f1 = iobj.W_IntObject(self.space, x) - result = iobj.str__Int(self.space, f1) - assert self.space.unwrap(result) == str(x) - - def test_hash(self): - x = 42 - f1 = iobj.W_IntObject(self.space, x) - result = iobj.hash__Int(self.space, f1) - assert result.intval == hash(x) - - def test_compare(self): - import operator - optab = ['lt', 'le', 'eq', 'ne', 'gt', 'ge'] - for x in (-10, -1, 0, 1, 2, 1000, sys.maxint): - for y in (-sys.maxint-1, -11, -9, -2, 0, 1, 3, 1111, sys.maxint): - for op in optab: - wx = iobj.W_IntObject(self.space, x) - wy = iobj.W_IntObject(self.space, y) - res = getattr(operator, op)(x, y) - method = getattr(iobj, '%s__Int_Int' % op) - myres = method(self.space, wx, wy) - assert self.space.unwrap(myres) == res - - def test_add(self): - x = 1 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - result = iobj.add__Int_Int(self.space, f1, f2) - assert result.intval == x+y - x = sys.maxint - y = 1 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - assert self.space.w_OverflowError == ( - self._unwrap_nonimpl(iobj.add__Int_Int, self.space, f1, f2)) - - def test_sub(self): - x = 1 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - result = iobj.sub__Int_Int(self.space, f1, f2) - assert result.intval == x-y - x = sys.maxint - y = -1 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - assert self.space.w_OverflowError == ( - self._unwrap_nonimpl(iobj.sub__Int_Int, self.space, f1, f2)) - - def test_mul(self): - x = 2 - y = 3 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - result = iobj.mul__Int_Int(self.space, f1, f2) - assert result.intval == x*y - x = -sys.maxint-1 - y = -1 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - assert self.space.w_OverflowError == ( - self._unwrap_nonimpl(iobj.mul__Int_Int, self.space, f1, f2)) - - def test_div(self): - for i in range(10): - res = i//3 - f1 = iobj.W_IntObject(self.space, i) - f2 = iobj.W_IntObject(self.space, 3) - result = iobj.div__Int_Int(self.space, f1, f2) - assert result.intval == res - x = -sys.maxint-1 - y = -1 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - assert self.space.w_OverflowError == ( - self._unwrap_nonimpl(iobj.div__Int_Int, self.space, f1, f2)) - - def test_mod(self): - x = 1 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - v = iobj.mod__Int_Int(self.space, f1, f2) - assert v.intval == x % y - # not that mod cannot overflow - - def test_divmod(self): - x = 1 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - ret = iobj.divmod__Int_Int(self.space, f1, f2) - v, w = self.space.unwrap(ret) - assert (v, w) == divmod(x, y) - x = -sys.maxint-1 - y = -1 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - assert self.space.w_OverflowError == ( - self._unwrap_nonimpl(iobj.divmod__Int_Int, self.space, f1, f2)) - - def test_pow_iii(self): - x = 10 - y = 2 - z = 13 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - f3 = iobj.W_IntObject(self.space, z) - v = iobj.pow__Int_Int_Int(self.space, f1, f2, f3) - assert v.intval == pow(x, y, z) - f1, f2, f3 = [iobj.W_IntObject(self.space, i) for i in (10, -1, 42)] - self.space.raises_w(self.space.w_TypeError, - iobj.pow__Int_Int_Int, - self.space, f1, f2, f3) - f1, f2, f3 = [iobj.W_IntObject(self.space, i) for i in (10, 5, 0)] - self.space.raises_w(self.space.w_ValueError, - iobj.pow__Int_Int_Int, - self.space, f1, f2, f3) - - def test_pow_iin(self): - x = 10 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - v = iobj.pow__Int_Int_None(self.space, f1, f2, self.space.w_None) - assert v.intval == x ** y - f1, f2 = [iobj.W_IntObject(self.space, i) for i in (10, 20)] - assert self.space.w_OverflowError == ( - self._unwrap_nonimpl(iobj.pow__Int_Int_None, self.space, f1, f2, self.space.w_None)) - - def test_neg(self): - x = 42 - f1 = iobj.W_IntObject(self.space, x) - v = iobj.neg__Int(self.space, f1) - assert v.intval == -x - x = -sys.maxint-1 - f1 = iobj.W_IntObject(self.space, x) - assert self.space.w_OverflowError == ( - self._unwrap_nonimpl(iobj.neg__Int, self.space, f1)) - - def test_pos(self): - x = 42 - f1 = iobj.W_IntObject(self.space, x) - v = iobj.pos__Int(self.space, f1) - assert v.intval == +x - x = -42 - f1 = iobj.W_IntObject(self.space, x) - v = iobj.pos__Int(self.space, f1) - assert v.intval == +x - - def test_abs(self): - x = 42 - f1 = iobj.W_IntObject(self.space, x) - v = iobj.abs__Int(self.space, f1) - assert v.intval == abs(x) - x = -42 - f1 = iobj.W_IntObject(self.space, x) - v = iobj.abs__Int(self.space, f1) - assert v.intval == abs(x) - x = -sys.maxint-1 - f1 = iobj.W_IntObject(self.space, x) - assert self.space.w_OverflowError == ( - self._unwrap_nonimpl(iobj.abs__Int, self.space, f1)) - - def test_invert(self): - x = 42 - f1 = iobj.W_IntObject(self.space, x) - v = iobj.invert__Int(self.space, f1) - assert v.intval == ~x - - def test_lshift(self): - x = 12345678 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - v = iobj.lshift__Int_Int(self.space, f1, f2) - assert v.intval == x << y - y = self._longshiftresult(x) - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - assert self.space.w_OverflowError == ( - self._unwrap_nonimpl(iobj.lshift__Int_Int, self.space, f1, f2)) - - def test_rshift(self): - x = 12345678 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - v = iobj.rshift__Int_Int(self.space, f1, f2) - assert v.intval == x >> y - - def test_and(self): - x = 12345678 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - v = iobj.and__Int_Int(self.space, f1, f2) - assert v.intval == x & y - - def test_xor(self): - x = 12345678 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - v = iobj.xor__Int_Int(self.space, f1, f2) - assert v.intval == x ^ y - - def test_or(self): - x = 12345678 - y = 2 - f1 = iobj.W_IntObject(self.space, x) - f2 = iobj.W_IntObject(self.space, y) - v = iobj.or__Int_Int(self.space, f1, f2) - assert v.intval == x | y - - def test_int(self): - f1 = iobj.W_IntObject(self.space, 1) - result = iobj.int__Int(self.space, f1) - assert result == f1 - -## def test_long(self): -## x = 1 -## f1 = iobj.W_IntObject(self.space, x) -## result = iobj.int_long(self.space, f1) -## self.assertEquals(self.space.unwrap(result), long(x)) - -## def test_float(self): -## x = 1 -## f1 = iobj.W_IntObject(self.space, x) -## result = iobj.int_float(self.space, f1) -## self.assertEquals(self.space.unwrap(result), float(x)) - - def test_oct(self): - x = 012345 - f1 = iobj.W_IntObject(self.space, x) - result = iobj.oct__Int(self.space, f1) - assert self.space.unwrap(result) == oct(x) - - def test_hex(self): - x = 0x12345 - f1 = iobj.W_IntObject(self.space, x) - result = iobj.hex__Int(self.space, f1) - assert self.space.unwrap(result) == hex(x) - -class AppTestInt: - - def test_int_callable(self): - assert 43 == int(43) - - def test_int_string(self): - assert 42 == int("42") - assert 10000000000 == long("10000000000") - - def test_int_float(self): - assert 4 == int(4.2) - - def test_int_str_repr(self): - assert "42" == str(42) - assert "42" == repr(42) - raises(ValueError, int, '0x2A') - - def test_int_two_param(self): - assert 42 == int('0x2A', 0) - assert 42 == int('2A', 16) - assert 42 == int('42', 10) - raises(TypeError, int, 1, 10) - - def test_shift_zeros(self): - assert (1 << 0) == 1 - assert (1 >> 0) == 1 - - def test_overflow(self): - import sys - n = sys.maxint + 1 - assert isinstance(n, long) - - def test_pow(self): - assert pow(2, -10) == 1/1024. diff --git a/pypy/objspace/std/test/test_iterobject.py b/pypy/objspace/std/test/test_iterobject.py deleted file mode 100644 index 725413cd53..0000000000 --- a/pypy/objspace/std/test/test_iterobject.py +++ /dev/null @@ -1,62 +0,0 @@ -import autopath -from pypy.objspace.std.iterobject import W_SeqIterObject -from pypy.interpreter.error import OperationError - -objspacename = 'std' - -class TestW_IterObject: - - def body3(self, w_iter): - w = self.space.wrap - assert self.space.eq_w(self.space.next(w_iter), w(5)) - assert self.space.eq_w(self.space.next(w_iter), w(3)) - assert self.space.eq_w(self.space.next(w_iter), w(99)) - self.body0(w_iter) - - def body0(self, w_iter): - raises(OperationError, self.space.next, w_iter) - raises(OperationError, self.space.next, w_iter) - - def test_iter(self): - w = self.space.wrap - w_tuple = self.space.newtuple([w(5), w(3), w(99)]) - w_iter = W_SeqIterObject(self.space, w_tuple) - self.body3(w_iter) - - def test_iter_builtin(self): - w = self.space.wrap - w_tuple = self.space.newtuple([w(5), w(3), w(99)]) - w_iter = self.space.iter(w_tuple) - self.body3(w_iter) - - def test_emptyiter(self): - w_list = self.space.newlist([]) - w_iter = W_SeqIterObject(self.space, w_list) - self.body0(w_iter) - - def test_emptyiter_builtin(self): - w_list = self.space.newlist([]) - w_iter = self.space.iter(w_list) - self.body0(w_iter) - -class AppTestW_IterObjectApp: - def test_user_iter(self): - class C: - def next(self): - raise StopIteration - def __iter__(self): - return self - assert list(C()) == [] - - def test_iter_getitem(self): - class C: - def __getitem__(self, i): - return range(2)[i] - assert list(C()) == range(2) - - def test_iter_fail_noseq(self): - class C: - pass - raises(TypeError, - iter, - C()) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py deleted file mode 100644 index e0009b58e0..0000000000 --- a/pypy/objspace/std/test/test_listobject.py +++ /dev/null @@ -1,338 +0,0 @@ -#from __future__ import nested_scopes -import autopath -from pypy.objspace.std.listobject import W_ListObject -from pypy.interpreter.error import OperationError - - -objspacename = 'std' - -class TestW_ListObject: - - def test_is_true(self): - w = self.space.wrap - w_list = W_ListObject(self.space, []) - assert self.space.is_true(w_list) == False - w_list = W_ListObject(self.space, [w(5)]) - assert self.space.is_true(w_list) == True - w_list = W_ListObject(self.space, [w(5), w(3)]) - assert self.space.is_true(w_list) == True - - def test_len(self): - w = self.space.wrap - w_list = W_ListObject(self.space, []) - assert self.space.eq_w(self.space.len(w_list), w(0)) - w_list = W_ListObject(self.space, [w(5)]) - assert self.space.eq_w(self.space.len(w_list), w(1)) - w_list = W_ListObject(self.space, [w(5), w(3), w(99)]*111) - assert self.space.eq_w(self.space.len(w_list), w(333)) - - def test_getitem(self): - w = self.space.wrap - w_list = W_ListObject(self.space, [w(5), w(3)]) - assert self.space.eq_w(self.space.getitem(w_list, w(0)), w(5)) - assert self.space.eq_w(self.space.getitem(w_list, w(1)), w(3)) - assert self.space.eq_w(self.space.getitem(w_list, w(-2)), w(5)) - assert self.space.eq_w(self.space.getitem(w_list, w(-1)), w(3)) - self.space.raises_w(self.space.w_IndexError, - self.space.getitem, w_list, w(2)) - self.space.raises_w(self.space.w_IndexError, - self.space.getitem, w_list, w(42)) - self.space.raises_w(self.space.w_IndexError, - self.space.getitem, w_list, w(-3)) - - def test_iter(self): - w = self.space.wrap - w_list = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_iter = self.space.iter(w_list) - assert self.space.eq_w(self.space.next(w_iter), w(5)) - assert self.space.eq_w(self.space.next(w_iter), w(3)) - assert self.space.eq_w(self.space.next(w_iter), w(99)) - raises(OperationError, self.space.next, w_iter) - raises(OperationError, self.space.next, w_iter) - - def test_contains(self): - w = self.space.wrap - w_list = W_ListObject(self.space, [w(5), w(3), w(99)]) - assert self.space.eq_w(self.space.contains(w_list, w(5)), - self.space.w_True) - assert self.space.eq_w(self.space.contains(w_list, w(99)), - self.space.w_True) - assert self.space.eq_w(self.space.contains(w_list, w(11)), - self.space.w_False) - assert self.space.eq_w(self.space.contains(w_list, w_list), - self.space.w_False) - - def test_getslice(self): - w = self.space.wrap - - def test1(testlist, start, stop, step, expected): - w_slice = self.space.newslice(w(start), w(stop), w(step)) - w_list = W_ListObject(self.space, [w(i) for i in testlist]) - w_result = self.space.getitem(w_list, w_slice) - assert self.space.unwrap(w_result) == expected - - for testlist in [[], [5,3,99]]: - for start in [-2, 0, 1, 10]: - for end in [-1, 2, 999]: - test1(testlist, start, end, 1, testlist[start:end]) - - test1([5,7,1,4], 3, 1, -2, [4,]) - test1([5,7,1,4], 3, 0, -2, [4, 7]) - test1([5,7,1,4], 3, -1, -2, []) - test1([5,7,1,4], -2, 11, 2, [1,]) - test1([5,7,1,4], -3, 11, 2, [7, 4]) - test1([5,7,1,4], -5, 11, 2, [5, 1]) - - def test_setslice(self): - w = self.space.wrap - - def test1(lhslist, start, stop, rhslist, expected): - w_slice = self.space.newslice(w(start), w(stop), w(1)) - w_lhslist = W_ListObject(self.space, [w(i) for i in lhslist]) - w_rhslist = W_ListObject(self.space, [w(i) for i in rhslist]) - self.space.setitem(w_lhslist, w_slice, w_rhslist) - assert self.space.unwrap(w_lhslist) == expected - - - test1([5,7,1,4], 1, 3, [9,8], [5,9,8,4]) - test1([5,7,1,4], 1, 3, [9], [5,9,4]) - test1([5,7,1,4], 1, 3, [9,8,6],[5,9,8,6,4]) - test1([5,7,1,4], 1, 3, [], [5,4]) - test1([5,7,1,4], 2, 2, [9], [5,7,9,1,4]) - test1([5,7,1,4], 0, 99,[9,8], [9,8]) - - def test_add(self): - w = self.space.wrap - w_list0 = W_ListObject(self.space, []) - w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list2 = W_ListObject(self.space, [w(-7)] * 111) - assert self.space.eq_w(self.space.add(w_list1, w_list1), - W_ListObject(self.space, [w(5), w(3), w(99), - w(5), w(3), w(99)])) - assert self.space.eq_w(self.space.add(w_list1, w_list2), - W_ListObject(self.space, [w(5), w(3), w(99)] + - [w(-7)] * 111)) - assert self.space.eq_w(self.space.add(w_list1, w_list0), w_list1) - assert self.space.eq_w(self.space.add(w_list0, w_list2), w_list2) - - def test_mul(self): - # only testing right mul at the moment - w = self.space.wrap - arg = w(2) - n = 3 - w_lis = W_ListObject(self.space, [arg]) - w_lis3 = W_ListObject(self.space, [arg]*n) - w_res = self.space.mul(w_lis, w(n)) - assert self.space.eq_w(w_lis3, w_res) - # commute - w_res = self.space.mul(w(n), w_lis) - assert self.space.eq_w(w_lis3, w_res) - - def test_setitem(self): - w = self.space.wrap - w_list = W_ListObject(self.space, [w(5), w(3)]) - w_exp1 = W_ListObject(self.space, [w(5), w(7)]) - w_exp2 = W_ListObject(self.space, [w(8), w(7)]) - self.space.setitem(w_list, w(1), w(7)) - assert self.space.eq_w(w_exp1, w_list) - self.space.setitem(w_list, w(-2), w(8)) - assert self.space.eq_w(w_exp2, w_list) - self.space.raises_w(self.space.w_IndexError, - self.space.setitem, w_list, w(2), w(5)) - self.space.raises_w(self.space.w_IndexError, - self.space.setitem, w_list, w(-3), w(5)) - - def test_eq(self): - w = self.space.wrap - - w_list0 = W_ListObject(self.space, []) - w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) - - assert self.space.eq_w(self.space.eq(w_list0, w_list1), - self.space.w_False) - assert self.space.eq_w(self.space.eq(w_list1, w_list0), - self.space.w_False) - assert self.space.eq_w(self.space.eq(w_list1, w_list1), - self.space.w_True) - assert self.space.eq_w(self.space.eq(w_list1, w_list2), - self.space.w_True) - assert self.space.eq_w(self.space.eq(w_list2, w_list3), - self.space.w_False) - def test_ne(self): - w = self.space.wrap - - w_list0 = W_ListObject(self.space, []) - w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) - - assert self.space.eq_w(self.space.ne(w_list0, w_list1), - self.space.w_True) - assert self.space.eq_w(self.space.ne(w_list1, w_list0), - self.space.w_True) - assert self.space.eq_w(self.space.ne(w_list1, w_list1), - self.space.w_False) - assert self.space.eq_w(self.space.ne(w_list1, w_list2), - self.space.w_False) - assert self.space.eq_w(self.space.ne(w_list2, w_list3), - self.space.w_True) - def test_lt(self): - w = self.space.wrap - - w_list0 = W_ListObject(self.space, []) - w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) - - assert self.space.eq_w(self.space.lt(w_list0, w_list1), - self.space.w_True) - assert self.space.eq_w(self.space.lt(w_list1, w_list0), - self.space.w_False) - assert self.space.eq_w(self.space.lt(w_list1, w_list1), - self.space.w_False) - assert self.space.eq_w(self.space.lt(w_list1, w_list2), - self.space.w_False) - assert self.space.eq_w(self.space.lt(w_list2, w_list3), - self.space.w_True) - assert self.space.eq_w(self.space.lt(w_list4, w_list3), - self.space.w_True) - - def test_ge(self): - w = self.space.wrap - - w_list0 = W_ListObject(self.space, []) - w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) - - assert self.space.eq_w(self.space.ge(w_list0, w_list1), - self.space.w_False) - assert self.space.eq_w(self.space.ge(w_list1, w_list0), - self.space.w_True) - assert self.space.eq_w(self.space.ge(w_list1, w_list1), - self.space.w_True) - assert self.space.eq_w(self.space.ge(w_list1, w_list2), - self.space.w_True) - assert self.space.eq_w(self.space.ge(w_list2, w_list3), - self.space.w_False) - assert self.space.eq_w(self.space.ge(w_list4, w_list3), - self.space.w_False) - - def test_gt(self): - w = self.space.wrap - - w_list0 = W_ListObject(self.space, []) - w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) - - assert self.space.eq_w(self.space.gt(w_list0, w_list1), - self.space.w_False) - assert self.space.eq_w(self.space.gt(w_list1, w_list0), - self.space.w_True) - assert self.space.eq_w(self.space.gt(w_list1, w_list1), - self.space.w_False) - assert self.space.eq_w(self.space.gt(w_list1, w_list2), - self.space.w_False) - assert self.space.eq_w(self.space.gt(w_list2, w_list3), - self.space.w_False) - assert self.space.eq_w(self.space.gt(w_list4, w_list3), - self.space.w_False) - - def test_le(self): - w = self.space.wrap - - w_list0 = W_ListObject(self.space, []) - w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) - w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) - - assert self.space.eq_w(self.space.le(w_list0, w_list1), - self.space.w_True) - assert self.space.eq_w(self.space.le(w_list1, w_list0), - self.space.w_False) - assert self.space.eq_w(self.space.le(w_list1, w_list1), - self.space.w_True) - assert self.space.eq_w(self.space.le(w_list1, w_list2), - self.space.w_True) - assert self.space.eq_w(self.space.le(w_list2, w_list3), - self.space.w_True) - assert self.space.eq_w(self.space.le(w_list4, w_list3), - self.space.w_True) - -class AppTestW_ListObject: - def test_explicit_new_init(self): - l = l0 = list.__new__(list) - l.__init__([1,2]) - assert l is l0 - assert l ==[1,2] - list.__init__(l,[1,2,3]) - assert l is l0 - assert l ==[1,2,3] - - def test_extend_list(self): - l = l0 = [1] - l.extend([2]) - assert l is l0 - assert l == [1,2] - - def test_extend_tuple(self): - l = l0 = [1] - l.extend((2,)) - assert l is l0 - assert l == [1,2] - - def test_sort(self): - l = l0 = [1, 5, 3, 0] - l.sort() - assert l is l0 - assert l == [0, 1, 3, 5] - l = l0 = [] - l.sort() - assert l is l0 - assert l == [] - l = l0 = [1] - l.sort() - assert l is l0 - assert l == [1] - - def test_sort_cmp(self): - def lencmp(a,b): return cmp(len(a), len(b)) - l = [ 'a', 'fiver', 'tre', '' ] - l.sort(lencmp) - assert l == ['', 'a', 'tre', 'fiver'] - l = [] - l.sort(lencmp) - assert l == [] - l = [ 'a' ] - l.sort(lencmp) - assert l == [ 'a' ] - - def test_extended_slice(self): - l = range(10) - del l[::2] - assert l ==[1,3,5,7,9] - l[-2::-1] = l[:-1] - assert l ==[7,5,3,1,9] - del l[-1:2:-1] - assert l ==[7,5,3] - del l[:2] - assert l ==[3] - - def test_delall(self): - l = l0 = [1,2,3] - del l[:] - assert l is l0 - assert l == [] - - def test_iadd(self): - l = l0 = [1,2,3] - l += [4,5] - assert l is l0 - assert l == [1,2,3,4,5] diff --git a/pypy/objspace/std/test/test_multimethod.py b/pypy/objspace/std/test/test_multimethod.py deleted file mode 100644 index 4fe246a884..0000000000 --- a/pypy/objspace/std/test/test_multimethod.py +++ /dev/null @@ -1,145 +0,0 @@ -import autopath - -from pypy.objspace.std.multimethod import * - -BoundMultiMethod.ASSERT_BASE_TYPE = object - - -objspacename = 'std' - -class X: - def __init__(self, value): - self.value = value - def __repr__(self): - return '<X %r>' % self.value - -def from_y_to_x(space, yinstance): - return X(yinstance) - -from_y_to_x.result_class = X -from_y_to_x.priority = 2 - -def from_x_to_str(space, xinstance): - #if xinstance.value: - return w('!' + repr(xinstance.value)) - #else: - # return [] - -from_x_to_str.result_class = str -from_x_to_str.priority = 2 - - -class Y: - def __init__(self, value): - self.value = value - def __repr__(self): - return '<Y %r>' % self.value - def __nonzero__(self): - return self.value != 666 - - -def add_x_x(space, x1, x2): - return "add_x_x", x1, x2 - -def add_x_y(space, x1, y2): - if x1.value < 0: - raise FailedToImplement(ValueError, 'not good') - return "add_x_y", x1, y2 - -def add_y_y(space, y1, y2): - return "add_y_y", y1, y2 - -def add_string_string(space, x, y): - return "add_string_string", x, y - -def add_int_string(space, x, y): - return "add_int_string", x, y - -def add_int_any(space, y1, o2): - return "add_int_any", y1, o2 - -class FakeObjSpace: - add = MultiMethod('+', 2, []) - add.register(add_x_x, X, X) - add.register(add_x_y, X, Y) - add.register(add_y_y, Y, Y) - add.register(add_string_string, str, str) - add.register(add_int_string, int, str) - add.register(add_int_any, int, object) - - delegate = DelegateMultiMethod() - delegate.register(from_y_to_x, Y) - delegate.register(from_x_to_str, X) - - def wrap(self, x): - return '<wrapped %r>' % (x,) - w_TypeError = 'w_TypeError' - -##def w(x, cache={}): -## if type(x) in cache: -## Stub = cache[type(x)] -## else: -## Stub = type(type(x))('%s_stub' % type(x).__name__, (type(x),), {}) -## Stub.dispatchclass = Stub -## cache[type(x)] = Stub -## return Stub(x) - -##X.dispatchclass = X -##Y.dispatchclass = Y - -def w(x): - return x - - -class TestMultiMethod: - def setup_method(self,method): - # only run when testing stdobjectspace - #XXX removed: self.space - self.space = FakeObjSpace() - - def test_non_delegate(self): - space = self.space - - r = space.add(X(2), X(5)) - assert repr(r) == "('add_x_x', <X 2>, <X 5>)" - - r = space.add(X(3), Y(4)) - assert repr(r) == "('add_x_y', <X 3>, <Y 4>)" - - r = space.add(Y(0), Y(20)) - assert repr(r) == "('add_y_y', <Y 0>, <Y 20>)" - - r = space.add(w(-3), w([7,6,5])) - assert repr(r) == "('add_int_any', -3, [7, 6, 5])" - - r = space.add(w(5), w("test")) - assert repr(r) == "('add_int_string', 5, 'test')" - - r = space.add(w("x"), w("y")) - assert repr(r) == "('add_string_string', 'x', 'y')" - - def test_delegate_y_to_x(self): - space = self.space - r = space.add(Y(-1), X(7)) - assert repr(r) == "('add_x_x', <X <Y -1>>, <X 7>)" - - r = space.add(Y(1), X(7)) - assert repr(r) == "('add_x_x', <X <Y 1>>, <X 7>)" - - r = space.add(X(-3), Y(20)) - assert repr(r) == "('add_x_x', <X -3>, <X <Y 20>>)" - - def test_no_operation_defined(self): - space = self.space - raises(OperationError, space.add, w([3]), w(4)) - raises(OperationError, space.add, w(3.0), w('bla')) - #raises(OperationError, space.add, X(0), w("spam")) - #raises(OperationError, space.add, Y(666), w("egg")) - - def test_delegate_x_to_str(self): - space = self.space - r = space.add(X(42), w("spam")) - assert repr(r) == "('add_string_string', '!42', 'spam')" - - r = space.add(Y(20), w("egg")) - assert repr(r) == "('add_string_string', '!<Y 20>', 'egg')" diff --git a/pypy/objspace/std/test/test_noneobject.py b/pypy/objspace/std/test/test_noneobject.py deleted file mode 100644 index 09fc569b99..0000000000 --- a/pypy/objspace/std/test/test_noneobject.py +++ /dev/null @@ -1,16 +0,0 @@ -import autopath - - -objspacename = 'std' - -class TestW_NoneObject: - - def test_equality(self): - assert self.space.eq_w(self.space.w_None, self.space.w_None) - - def test_inequality(self): - neresult = self.space.ne(self.space.w_None, self.space.w_None) - assert not self.space.is_true(neresult) - - def test_false(self): - assert not self.space.is_true(self.space.w_None) diff --git a/pypy/objspace/std/test/test_restricted_int.py b/pypy/objspace/std/test/test_restricted_int.py deleted file mode 100644 index 3fb3090be4..0000000000 --- a/pypy/objspace/std/test/test_restricted_int.py +++ /dev/null @@ -1,128 +0,0 @@ -import unittest -import autopath -from pypy.objspace.std.restricted_int import * -import sys - -maxint_mask = (sys.maxint*2 + 1) - -objspacename = 'std' - -class Test_r_int: - - def setup_method(self,method): - space = self.space - - def test__add__(self): - self.binary_test(lambda x, y: x + y) - def test__sub__(self): - self.binary_test(lambda x, y: x - y) - def test__mul__(self): - self.binary_test(lambda x, y: x * y) - x = 3; y = [2] - assert x*y == r_int(x)*y - assert y*x == y*r_int(x) - def test__div__(self): - self.binary_test(lambda x, y: x // y) - def test__mod__(self): - self.binary_test(lambda x, y: x % y) - def test__divmod__(self): - self.binary_test(divmod) - def test__lshift__(self): - self.binary_test(lambda x, y: x << y, (1, 2, 3)) - def test__rshift__(self): - self.binary_test(lambda x, y: x >> y, (1, 2, 3)) - def test__or__(self): - self.binary_test(lambda x, y: x | y) - def test__and__(self): - self.binary_test(lambda x, y: x & y) - def test__xor__(self): - self.binary_test(lambda x, y: x ^ y) - def test__neg__(self): - self.unary_test(lambda x: -x) - def test__pos__(self): - self.unary_test(lambda x: +x) - def test__invert__(self): - self.unary_test(lambda x: ~x) - def test__pow__(self): - self.binary_test(lambda x, y: x**y, (2, 3)) - self.binary_test(lambda x, y: pow(x, y, 42), (2, 3, 5, 1000)) - - def unary_test(self, f): - for arg in (-10, -1, 0, 3, 12345): - res = f(arg) - cmp = f(r_int(arg)) - assert res == cmp - - def binary_test(self, f, rargs = None): - if not rargs: - rargs = (-10, -1, 3, 55) - for larg in (-10, -1, 0, 3, 1234): - for rarg in rargs: - for types in ((int, r_int), (r_int, int), (r_int, r_int)): - res = f(larg, rarg) - left, right = types - cmp = f(left(larg), right(rarg)) - assert res == cmp - -class Test_r_uint: - - def setup_method(self,method): - space = self.space - - def test__add__(self): - self.binary_test(lambda x, y: x + y) - def test__sub__(self): - self.binary_test(lambda x, y: x - y) - def test__mul__(self): - self.binary_test(lambda x, y: x * y) - x = 3; y = [2] - assert x*y == r_uint(x)*y - assert y*x == y*r_uint(x) - def test__div__(self): - self.binary_test(lambda x, y: x // y) - def test__mod__(self): - self.binary_test(lambda x, y: x % y) - def test__divmod__(self): - self.binary_test(divmod) - def test__lshift__(self): - self.binary_test(lambda x, y: x << y, (1, 2, 3)) - def test__rshift__(self): - self.binary_test(lambda x, y: x >> y, (1, 2, 3)) - def test__or__(self): - self.binary_test(lambda x, y: x | y) - def test__and__(self): - self.binary_test(lambda x, y: x & y) - def test__xor__(self): - self.binary_test(lambda x, y: x ^ y) - def test__neg__(self): - self.unary_test(lambda x: -x) - def test__pos__(self): - self.unary_test(lambda x: +x) - def test__invert__(self): - self.unary_test(lambda x: ~x) - def test__pow__(self): - self.binary_test(lambda x, y: x**y, (2, 3)) - # pow is buggy, dowsn't allow our type - #self.binary_test(lambda x, y: pow(x, y, 42), (2, 3, 5, 1000)) - - def unary_test(self, f): - for arg in (0, 3, 12345): - res = f(arg) & maxint_mask - cmp = f(r_uint(arg)) - assert res == cmp - - def binary_test(self, f, rargs = None): - mask = maxint_mask - if not rargs: - rargs = (1, 3, 55) - for larg in (0, 1, 2, 3, 1234): - for rarg in rargs: - for types in ((int, r_uint), (r_uint, int), (r_uint, r_uint)): - res = f(larg, rarg) - left, right = types - cmp = f(left(larg), right(rarg)) - if type(res) is tuple: - res = res[0] & mask, res[1] & mask - else: - res = res & mask - assert res == cmp diff --git a/pypy/objspace/std/test/test_sliceobject.py b/pypy/objspace/std/test/test_sliceobject.py deleted file mode 100644 index 01ffbca8b2..0000000000 --- a/pypy/objspace/std/test/test_sliceobject.py +++ /dev/null @@ -1,51 +0,0 @@ -import autopath - -objspacename = 'std' - -class TestW_SliceObject: - - def equal_indices(self, got, expected): - assert len(got) == len(expected) - for g, e in zip(got, expected): - assert g == e - - def test_indices(self): - from pypy.objspace.std import slicetype - space = self.space - w = space.wrap - w_None = space.w_None - w_slice = space.newslice(w_None, w_None, w_None) - self.equal_indices(slicetype.indices3(space, w_slice, 6), (0, 6, 1)) - w_slice = space.newslice(w(0), w(6), w(1)) - self.equal_indices(slicetype.indices3(space, w_slice, 6), (0, 6, 1)) - w_slice = space.newslice(w_None, w_None, w(-1)) - self.equal_indices(slicetype.indices3(space, w_slice, 6), (5, -1, -1)) - - def test_indices_fail(self): - from pypy.objspace.std import slicetype - space = self.space - w = space.wrap - w_None = space.w_None - w_slice = space.newslice(w_None, w_None, w(0)) - self.space.raises_w(space.w_ValueError, - slicetype.indices3, space, w_slice, 10) - -class AppTest_SliceObject: - def test_new(self): - def cmp_slice(sl1, sl2): - for attr in "start", "stop", "step": - if getattr(sl1, attr) != getattr(sl2, attr): - return False - return True - raises(TypeError, slice) - raises(TypeError, slice, 1, 2, 3, 4) - assert cmp_slice(slice(23), slice(None, 23, None)) - assert cmp_slice(slice(23, 45), slice(23, 45, None)) - - def test_indices(self): - assert slice(4,11,2).indices(28) == (4, 11, 2) - assert slice(4,11,2).indices(8) == (4, 8, 2) - assert slice(4,11,2).indices(2) == (2, 2, 2) - assert slice(11,4,-2).indices(28) == (11, 4, -2) - assert slice(11,4,-2).indices(8) == (7, 4, -2) - assert slice(11,4,-2).indices(2) == (1, 2, -2) diff --git a/pypy/objspace/std/test/test_stdobjspace.py b/pypy/objspace/std/test/test_stdobjspace.py deleted file mode 100644 index 272a6fbab9..0000000000 --- a/pypy/objspace/std/test/test_stdobjspace.py +++ /dev/null @@ -1,14 +0,0 @@ -import autopath - -objspacename = 'std' - -class TestW_StdObjSpace: - - def test_wrap_wrap(self): - raises(TypeError, - self.space.wrap, - self.space.wrap(0)) - - def hopeful_test_exceptions(self): - self.apptest("self.failUnless(issubclass(ArithmeticError, Exception))") - self.apptest("self.failIf(issubclass(ArithmeticError, KeyError))") diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py deleted file mode 100644 index cd9558a21b..0000000000 --- a/pypy/objspace/std/test/test_stringformat.py +++ /dev/null @@ -1,144 +0,0 @@ -import autopath - -objspacename = 'std' - -class AppTestStringObjectWithDict: - - def test_format_item(self): - d = {'i': 23} - assert 'a23b' == 'a%(i)sb' % d - assert '23b' == '%(i)sb' % d - assert 'a23' == 'a%(i)s' % d - assert '23' == '%(i)s' % d - - def test_format_two_items(self): - d = {'i': 23, 'j': 42} - assert 'a23b42c' == 'a%(i)sb%(j)sc' % d - assert 'a23b23c' == 'a%(i)sb%(i)sc' % d - - def test_format_percent(self): - assert 'a%b' == 'a%%b' % {} - - def test_format_empty_key(self): - d = {'':42} - assert '42' == '%()s' % d - - def test_format_wrong_char(self): - d = {'i': 23} - raises(ValueError, 'a%(i)Zb'.__mod__, d) - - def test_format_missing(self): - d = {'i': 23} - raises(KeyError, 'a%(x)sb'.__mod__, d) - -class AppTestStringObject: - - def test_format_item(self): - assert 'a23b' == 'a%sb' % 23 - assert '23b' == '%sb' % 23 - assert 'a23' == 'a%s' % 23 - assert '23' == '%s' % 23 - - def test_format_percent(self): - assert 'a%b' == 'a%%b' % () - assert '%b' == '%%b' % () - assert 'a%' == 'a%%' % () - assert '%' == '%%' % () - - def test_format_too_much(self): - raises(TypeError, '%s%s'.__mod__, ()) - raises(TypeError, '%s%s'.__mod__, (23,)) - - def test_format_not_enough(self): - raises(TypeError, '%s%s'.__mod__, (23,)*3) - raises(TypeError, '%s%s'.__mod__, (23,)*4) - - def test_format_string(self): - assert '23' == '%s' % '23' - assert "'23'" == '%r' % '23' - raises(TypeError, '%d'.__mod__, "23") - - def test_format_float(self): - assert '23' == '%d' % 23.456 - assert '17' == '%x' % 23.456 - assert '23.456' == '%s' % 23.456 - # for 'r' use a float that has an exact decimal rep: - assert '23.125' == '%r' % 23.125 - - def test_format_int(self): - assert '23' == '%d' % 23 - assert '17' == '%x' % 23 - assert '0x17' == '%#x' % 23 - assert '23' == '%s' % 23 - assert '23' == '%r' % 23 - - def test_format_list(self): - assert '<[1, 2]>' == '<%s>' % [1,2] - assert '<[1, 2]-[3, 4]>' == '<%s-%s>' % ([1,2], [3,4]) - - def test_format_tuple(self): - assert '<(1, 2)>' == '<%s>' % ((1,2),) - assert '<(1, 2)-(3, 4)>' == '<%s-%s>' % ((1,2), (3,4)) - - def test_format_dict(self): - - # I'll just note that the first of these two completely - # contradicts what CPython's documentation says: - - # When the right argument is a dictionary (or other - # mapping type), then the formats in the string - # \emph{must} include a parenthesised mapping key into - # that dictionary inserted immediately after the - # \character{\%} character. - - # It is what CPython *does*, however. All software sucks. - - assert '<{1: 2}>' == '<%s>' % {1:2} - assert '<{1: 2}-{3: 4}>' == '<%s-%s>' % ({1:2}, {3:4}) - - def test_format_wrong_char(self): - raises(ValueError, 'a%Zb'.__mod__, ((23,),)) - - def test_incomplete_format(self): - raises(ValueError, '%'.__mod__, ((23,),)) - -class AppTestWidthPrec: - def test_width(self): - assert "%3s" %'a' == ' a' - assert "%-3s"%'a' == 'a ' - - def test_prec_string(self): - assert "%.3s"%'a' == 'a' - assert "%.3s"%'abcde' == 'abc' - - def test_prec_width_string(self): - assert "%5.3s" %'a' == ' a' - assert "%5.3s" %'abcde' == ' abc' - assert "%-5.3s"%'a' == 'a ' - assert "%-5.3s"%'abcde' == 'abc ' - - def test_zero_pad(self): - assert "%02d"%1 == "01" - assert "%05d"%1 == "00001" - assert "%-05d"%1 == "1 " - assert "%04f"%2.25 == "2.250000" - assert "%05g"%2.25 == "02.25" - assert "%-05g"%2.25 =="2.25 " - assert "%05s"%2.25 == " 2.25" - - - def test_star_width(self): - assert "%*s" %( 5, 'abc') == ' abc' - assert "%*s" %(-5, 'abc') == 'abc ' - assert "%-*s"%( 5, 'abc') == 'abc ' - assert "%-*s"%(-5, 'abc') == 'abc ' - - def test_star_prec(self): - assert "%.*s"%( 3, 'abc') == 'abc' - assert "%.*s"%( 3, 'abcde') == 'abc' - assert "%.*s"%(-3, 'abc') == '' - - def test_star_width_prec(self): - assert "%*.*s"%( 5, 3, 'abc') == ' abc' - assert "%*.*s"%( 5, 3, 'abcde') == ' abc' - assert "%*.*s"%(-5, 3, 'abcde') == 'abc ' diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py deleted file mode 100644 index 643a63912f..0000000000 --- a/pypy/objspace/std/test/test_stringobject.py +++ /dev/null @@ -1,479 +0,0 @@ -import autopath -from pypy.objspace.std import stringobject -from pypy.objspace.std.stringobject import W_StringObject - - -objspacename = 'std' - -class TestW_StringObject: - - def teardown_method(self,method): - pass - -## def test_order_rich(self): -## space = self.space -## def w(txt): -## return W_StringObject(space, txt) -## strs = ['ala', 'bla', 'ala', 'alaaa', '', 'b'] -## ops = [ 'EQ', 'LT', 'GT', 'NE', 'LE', 'GE' ] - -## while strs[1:]: -## str1 = strs.pop() -## for op in ops: -## #original python function -## orf = getattr(str1, '__%s__' % op.lower()) -## pypyconst = getattr(stringobject, op) -## for str2 in strs: -## if orf(str2): -## self.failUnless_w( -## string_richcompare(space, -## w(str1), -## w(str2), -## pypyconst)) -## else: -## self.failIf_w( -## string_richcompare(space, -## w(str1), -## w(str2), -## pypyconst)) - - def test_equality(self): - w = self.space.wrap - assert self.space.eq_w(w('abc'), w('abc')) - assert not self.space.eq_w(w('abc'), w('def')) - - def test_order_cmp(self): - space = self.space - w = space.wrap - assert self.space.is_true(space.lt(w('a'), w('b'))) - assert self.space.is_true(space.lt(w('a'), w('ab'))) - assert self.space.is_true(space.le(w('a'), w('a'))) - assert self.space.is_true(space.gt(w('a'), w(''))) - - def test_truth(self): - w = self.space.wrap - assert self.space.is_true(w('non-empty')) - assert not self.space.is_true(w('')) - - def test_getitem(self): - space = self.space - w = space.wrap - w_str = w('abc') - assert self.space.eq_w(space.getitem(w_str, w(0)), w('a')) - assert self.space.eq_w(space.getitem(w_str, w(-1)), w('c')) - self.space.raises_w(space.w_IndexError, - space.getitem, - w_str, - w(3)) - - def test_slice(self): - space = self.space - w = space.wrap - w_str = w('abc') - - w_slice = space.newslice(w(0), w(0), None) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('')) - - w_slice = space.newslice(w(0), w(1), None) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('a')) - - w_slice = space.newslice(w(0), w(10), None) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('abc')) - - w_slice = space.newslice(space.w_None, space.w_None, None) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('abc')) - - w_slice = space.newslice(space.w_None, w(-1), None) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('ab')) - - w_slice = space.newslice(w(-1), space.w_None, None) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('c')) - - def test_extended_slice(self): - space = self.space - if self.space.__class__.__name__.startswith('Trivial'): - import sys - if sys.version < (2, 3): - return - w_None = space.w_None - w = space.wrap - w_str = w('hello') - - w_slice = space.newslice(w_None, w_None, w(1)) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('hello')) - - w_slice = space.newslice(w_None, w_None, w(-1)) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('olleh')) - - w_slice = space.newslice(w_None, w_None, w(2)) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('hlo')) - - w_slice = space.newslice(w(1), w_None, w(2)) - assert self.space.eq_w(space.getitem(w_str, w_slice), w('el')) - -class AppTestStringObject: - def test_format_wrongchar(self): - raises(ValueError, 'a%Zb'.__mod__, ((23,),)) - - def test_split(self): - assert "".split() == [] - assert "a".split() == ['a'] - assert " a ".split() == ['a'] - assert "a b c".split() == ['a','b','c'] - assert 'this is the split function'.split() == ['this', 'is', 'the', 'split', 'function'] - assert 'a|b|c|d'.split('|') == ['a', 'b', 'c', 'd'] - assert 'a|b|c|d'.split('|', 2) == ['a', 'b', 'c|d'] - assert 'a b c d'.split(None, 1) == ['a', 'b c d'] - assert 'a b c d'.split(None, 2) == ['a', 'b', 'c d'] - assert 'a b c d'.split(None, 3) == ['a', 'b', 'c', 'd'] - assert 'a b c d'.split(None, 4) == ['a', 'b', 'c', 'd'] - assert 'a b c d'.split(None, 0) == ['a b c d'] - assert 'a b c d'.split(None, 2) == ['a', 'b', 'c d'] - assert 'a b c d '.split() == ['a', 'b', 'c', 'd'] - assert 'a//b//c//d'.split('//') == ['a', 'b', 'c', 'd'] - assert 'endcase test'.split('test') == ['endcase ', ''] - - def test_split_splitchar(self): - assert "/a/b/c".split('/') == ['','a','b','c'] - - def test_title(self): - assert "brown fox".title() == "Brown Fox" - assert "!brown fox".title() == "!Brown Fox" - assert "bROWN fOX".title() == "Brown Fox" - assert "Brown Fox".title() == "Brown Fox" - assert "bro!wn fox".title() == "Bro!Wn Fox" - - def test_istitle(self): - assert "brown fox".istitle() == False - assert "!brown fox".istitle() == False - assert "bROWN fOX".istitle() == False - assert "Brown Fox".istitle() == True - assert "bro!wn fox".istitle() == False - assert "Bro!wn fox".istitle() == False - assert "!brown Fox".istitle() == False - assert "!Brown Fox".istitle() == True - assert "Brow&&&&N Fox".istitle() == True - assert "!Brow&&&&n Fox".istitle() == False - - def test_capitalize(self): - assert "brown fox".capitalize() == "Brown fox" - assert ' hello '.capitalize() == ' hello ' - assert 'Hello '.capitalize() == 'Hello ' - assert 'hello '.capitalize() == 'Hello ' - assert 'aaaa'.capitalize() == 'Aaaa' - assert 'AaAa'.capitalize() == 'Aaaa' - - def test_rjust(self): - s = "abc" - assert s.rjust(2) == s - assert s.rjust(3) == s - assert s.rjust(4) == " " + s - assert s.rjust(5) == " " + s - assert 'abc'.rjust(10) == ' abc' - assert 'abc'.rjust(6) == ' abc' - assert 'abc'.rjust(3) == 'abc' - assert 'abc'.rjust(2) == 'abc' - - def test_ljust(self): - s = "abc" - assert s.ljust(2) == s - assert s.ljust(3) == s - assert s.ljust(4) == s + " " - assert s.ljust(5) == s + " " - assert 'abc'.ljust(10) == 'abc ' - assert 'abc'.ljust(6) == 'abc ' - assert 'abc'.ljust(3) == 'abc' - assert 'abc'.ljust(2) == 'abc' - - def test_replace(self): - assert 'one!two!three!'.replace('!', '@', 1) == 'one@two!three!' - assert 'one!two!three!'.replace('!', '') == 'onetwothree' - assert 'one!two!three!'.replace('!', '@', 2) == 'one@two@three!' - assert 'one!two!three!'.replace('!', '@', 3) == 'one@two@three@' - assert 'one!two!three!'.replace('!', '@', 4) == 'one@two@three@' - assert 'one!two!three!'.replace('!', '@', 0) == 'one!two!three!' - assert 'one!two!three!'.replace('!', '@') == 'one@two@three@' - assert 'one!two!three!'.replace('x', '@') == 'one!two!three!' - assert 'one!two!three!'.replace('x', '@', 2) == 'one!two!three!' - assert 'abc'.replace('', '-') == '-a-b-c-' - assert 'abc'.replace('', '-', 3) == '-a-b-c' - assert 'abc'.replace('', '-', 0) == 'abc' - assert ''.replace('', '') == '' - assert 'abc'.replace('ab', '--', 0) == 'abc' - assert 'abc'.replace('xy', '--') == 'abc' - assert '123'.replace('123', '') == '' - assert '123123'.replace('123', '') == '' - assert '123x123'.replace('123', '') == 'x' - - - def test_strip(self): - s = " a b " - assert s.strip() == "a b" - assert s.rstrip() == " a b" - assert s.lstrip() == "a b " - assert 'xyzzyhelloxyzzy'.strip('xyz') == 'hello' - assert 'xyzzyhelloxyzzy'.lstrip('xyz') == 'helloxyzzy' - assert 'xyzzyhelloxyzzy'.rstrip('xyz') == 'xyzzyhello' - - def test_zfill(self): - assert '123'.zfill(2) == '123' - assert '123'.zfill(3) == '123' - assert '123'.zfill(4) == '0123' - assert '+123'.zfill(3) == '+123' - assert '+123'.zfill(4) == '+123' - assert '+123'.zfill(5) == '+0123' - assert '-123'.zfill(3) == '-123' - assert '-123'.zfill(4) == '-123' - assert '-123'.zfill(5) == '-0123' - assert ''.zfill(3) == '000' - assert '34'.zfill(1) == '34' - assert '34'.zfill(4) == '0034' - - def test_center(self): - s="a b" - assert s.center(0) == "a b" - assert s.center(1) == "a b" - assert s.center(2) == "a b" - assert s.center(3) == "a b" - assert s.center(4) == "a b " - assert s.center(5) == " a b " - assert s.center(6) == " a b " - assert s.center(7) == " a b " - assert s.center(8) == " a b " - assert s.center(9) == " a b " - assert 'abc'.center(10) == ' abc ' - assert 'abc'.center(6) == ' abc ' - assert 'abc'.center(3) == 'abc' - assert 'abc'.center(2) == 'abc' - - - def test_count(self): - assert "".count("x") ==0 - assert "".count("") ==1 - assert "Python".count("") ==7 - assert "ab aaba".count("ab") ==2 - assert 'aaa'.count('a') == 3 - assert 'aaa'.count('b') == 0 - assert 'aaa'.count('a', -1) == 1 - assert 'aaa'.count('a', -10) == 3 - assert 'aaa'.count('a', 0, -1) == 2 - assert 'aaa'.count('a', 0, -10) == 0 - - - def test_startswith(self): - assert 'ab'.startswith('ab') == 1 - assert 'ab'.startswith('a') == 1 - assert 'ab'.startswith('') == 1 - assert 'x'.startswith('a') == 0 - assert 'x'.startswith('x') == 1 - assert ''.startswith('') == 1 - assert ''.startswith('a') == 0 - assert 'x'.startswith('xx') == 0 - assert 'y'.startswith('xx') == 0 - - - def test_endswith(self): - assert 'ab'.endswith('ab') == 1 - assert 'ab'.endswith('b') == 1 - assert 'ab'.endswith('') == 1 - assert 'x'.endswith('a') == 0 - assert 'x'.endswith('x') == 1 - assert ''.endswith('') == 1 - assert ''.endswith('a') == 0 - assert 'x'.endswith('xx') == 0 - assert 'y'.endswith('xx') == 0 - - def test_expandtabs(self): - assert 'abc\rab\tdef\ng\thi'.expandtabs() == 'abc\rab def\ng hi' - assert 'abc\rab\tdef\ng\thi'.expandtabs(8) == 'abc\rab def\ng hi' - assert 'abc\rab\tdef\ng\thi'.expandtabs(4) == 'abc\rab def\ng hi' - assert 'abc\r\nab\tdef\ng\thi'.expandtabs(4) == 'abc\r\nab def\ng hi' - assert 'abc\rab\tdef\ng\thi'.expandtabs() == 'abc\rab def\ng hi' - assert 'abc\rab\tdef\ng\thi'.expandtabs(8) == 'abc\rab def\ng hi' - assert 'abc\r\nab\r\ndef\ng\r\nhi'.expandtabs(4) == 'abc\r\nab\r\ndef\ng\r\nhi' - - s = 'xy\t' - assert s.expandtabs() =='xy ' - - s = '\txy\t' - assert s.expandtabs() ==' xy ' - assert s.expandtabs(1) ==' xy ' - assert s.expandtabs(2) ==' xy ' - assert s.expandtabs(3) ==' xy ' - - assert 'xy'.expandtabs() =='xy' - assert ''.expandtabs() =='' - - - def test_splitlines(self): - s="ab\nab\n \n x\n\n\n" - assert s.splitlines() ==['ab', 'ab', ' ', ' x', '', ''] - assert s.splitlines() ==s.splitlines(0) - assert s.splitlines(1) ==['ab\n', 'ab\n', ' \n', ' x\n', '\n', '\n'] - s="\none\n\two\nthree\n\n" - assert s.splitlines() ==['', 'one', '\two', 'three', ''] - assert s.splitlines(1) ==['\n', 'one\n', '\two\n', 'three\n', '\n'] - - def test_find(self): - assert 'abcdefghiabc'.find('abc') == 0 - assert 'abcdefghiabc'.find('abc', 1) == 9 - assert 'abcdefghiabc'.find('def', 4) == -1 - - def test_index(self): - assert 'abcdefghiabc'.index('') == 0 - assert 'abcdefghiabc'.index('def') == 3 - assert 'abcdefghiabc'.index('abc') == 0 - assert 'abcdefghiabc'.index('abc', 1) == 9 - #XXX it comes UnicodeError - #self.assertRaises(ValueError, 'abcdefghiabc'.index('hib')) - #self.assertRaises(ValueError, 'abcdefghiab'.index('abc', 1)) - #self.assertRaises(ValueError, 'abcdefghi'.index('ghi', 8)) - #self.assertRaises(ValueError, 'abcdefghi'.index('ghi', -1)) - - def test_rfind(self): - assert 'abcdefghiabc'.rfind('abc') == 9 - assert 'abcdefghiabc'.rfind('') == 12 - assert 'abcdefghiabc'.rfind('abcd') == 0 - assert 'abcdefghiabc'.rfind('abcz') == -1 - - def test_rindex(self): - assert 'abcdefghiabc'.rindex('') == 12 - assert 'abcdefghiabc'.rindex('def') == 3 - assert 'abcdefghiabc'.rindex('abc') == 9 - assert 'abcdefghiabc'.rindex('abc', 0, -1) == 0 - #XXX it comes UnicodeError - #self.assertRaises(ValueError, 'abcdefghiabc'.rindex('hib')) - #self.assertRaises(ValueError, 'defghiabc'.rindex('def', 1)) - #self.assertRaises(ValueError, 'defghiabc'.rindex('abc', 0, -1)) - #self.assertRaises(ValueError, 'abcdefghi'.rindex('ghi', 0, 8)) - #self.assertRaises(ValueError, 'abcdefghi'.rindex('ghi', 0, -1)) - - - def test_split_maxsplit(self): - assert "/a/b/c".split('/', 2) == ['','a','b/c'] - assert "a/b/c".split("/") == ['a', 'b', 'c'] - assert " a ".split(None, 0) == ['a '] - assert " a ".split(None, 1) == ['a'] - assert " a a ".split(" ", 0) == [' a a '] - assert " a a ".split(" ", 1) == ['', 'a a '] - - def test_join(self): - assert ", ".join(['a', 'b', 'c']) == "a, b, c" - assert "".join([]) == "" - assert "-".join(['a', 'b']) == 'a-b' - raises(TypeError, ''.join, 1) - raises(TypeError, ''.join, [1]) - raises(TypeError, ''.join, [[1]]) - - def test_lower(self): - assert "aaa AAA".lower() == "aaa aaa" - assert "".lower() == "" - - def test_upper(self): - assert "aaa AAA".upper() == "AAA AAA" - assert "".upper() == "" - - def test_isalnum(self): - assert "".isalnum() == False - assert "!Bro12345w&&&&n Fox".isalnum() == False - assert "125 Brown Foxes".isalnum() == False - assert "125BrownFoxes".isalnum() == True - - def test_isalpha(self): - assert "".isalpha() == False - assert "!Bro12345w&&&&nFox".isalpha() == False - assert "Brown Foxes".isalpha() == False - assert "125".isalpha() == False - - def test_isdigit(self): - assert "".isdigit() == False - assert "!Bro12345w&&&&nFox".isdigit() == False - assert "Brown Foxes".isdigit() == False - assert "125".isdigit() == True - - def test_isspace(self): - assert "".isspace() == False - assert "!Bro12345w&&&&nFox".isspace() == False - assert " ".isspace() == True - assert "\t\t\b\b\n".isspace() == False - assert "\t\t".isspace() == True - assert "\t\t\r\r\n".isspace() == True - - def test_islower(self): - assert "".islower() == False - assert " ".islower() == False - assert "\t\t\b\b\n".islower() == False - assert "b".islower() == True - assert "bbb".islower() == True - assert "!bbb".islower() == False - assert "BBB".islower() == False - assert "bbbBBB".islower() == False - - def test_isupper(self): - assert "".isupper() == False - assert " ".isupper() == False - assert "\t\t\b\b\n".isupper() == False - assert "B".isupper() == True - assert "BBB".isupper() == True - assert "!BBB".isupper() == False - assert "bbb".isupper() == False - assert "BBBbbb".isupper() == False - - - def test_swapcase(self): - assert "aaa AAA 111".swapcase() == "AAA aaa 111" - assert "".swapcase() == "" - - def test_translate(self): - def maketrans(origin, image): - if len(origin) != len(image): - raise ValueError("maketrans arguments must have same length") - L = [chr(i) for i in range(256)] - for i in range(len(origin)): - L[ord(origin[i])] = image[i] - - tbl = ''.join(L) - return tbl - - table = maketrans('abc', 'xyz') - assert 'xyzxyz' == 'xyzabcdef'.translate(table, 'def') - - table = maketrans('a', 'A') - assert 'Abc' == 'abc'.translate(table) - assert 'xyz' == 'xyz'.translate(table) - assert 'yz' == 'xyz'.translate(table, 'x') - - #self.assertRaises(ValueError, 'xyz'.translate('too short', 'strip')) - #self.assertRaises(ValueError, 'xyz'.translate('too short')) - - def test_iter(self): - l=[] - for i in iter("42"): - l.append(i) - assert l == ['4','2'] - - def test_repr(self): - assert repr("") =="''" - assert repr("a") =="'a'" - assert repr("'") =='"\'"' - assert repr("\'") =="\"\'\"" - assert repr("\"") =='\'"\'' - assert repr("\t") =="'\\t'" - assert repr("\\") =="'\\\\'" - assert repr('') =="''" - assert repr('a') =="'a'" - assert repr('"') =="'\"'" - assert repr('\'') =='"\'"' - assert repr('\"') =="'\"'" - assert repr('\t') =="'\\t'" - assert repr('\\') =="'\\\\'" - assert repr("'''\"") =='\'\\\'\\\'\\\'"\'' - assert repr(chr(19)) =="'\\x13'" - assert repr(chr(2)) =="'\\x02'" - - def test_contains(self): - assert '' in 'abc' - assert 'a' in 'abc' - assert 'ab' in 'abc' - assert not 'd' in 'abc' - raises(TypeError, 'a'.__contains__, 1) diff --git a/pypy/objspace/std/test/test_strutil.py b/pypy/objspace/std/test/test_strutil.py deleted file mode 100644 index bfec44cdc8..0000000000 --- a/pypy/objspace/std/test/test_strutil.py +++ /dev/null @@ -1,115 +0,0 @@ -import autopath -from pypy.objspace.std.strutil import * - -objspacename = 'std' - -class TestStrUtil: - - def test_string_to_int(self): - cases = [('0', 0), - ('1', 1), - ('9', 9), - ('10', 10), - ('09', 9), - ('0000101', 101), # not octal unless base 0 or 8 - ('5123', 5123), - ('1891234174197319', 1891234174197319), - (' 0', 0), - ('0 ', 0), - (' \t \n 32313 \f \v \r \n\r ', 32313), - ('+12', 12), - ('-5', -5), - (' -123456789 ', -123456789), - ] - for s, expected in cases: - assert string_to_int(s) == expected - assert string_to_long(s) == expected - - def test_string_to_int_base(self): - cases = [('111', 2, 7), - ('010', 2, 2), - ('102', 3, 11), - ('103', 4, 19), - ('107', 8, 71), - ('109', 10, 109), - ('10A', 11, 131), - ('10a', 11, 131), - ('10f', 16, 271), - ('10F', 16, 271), - ('0x10f', 16, 271), - ('0x10F', 16, 271), - ('10z', 36, 1331), - ('10Z', 36, 1331), - ('12', 0, 12), - ('015', 0, 13), - ('0x10', 0, 16), - ('0XE', 0, 14), - ('0', 0, 0), - ('0x', 0, 0), # according to CPython so far - ('0X', 0, 0), # " " - ('0x', 16, 0), # " " - ('0X', 16, 0), # " " - ] - for s, base, expected in cases: - assert string_to_int(s, base) == expected - assert string_to_int('+'+s, base) == expected - assert string_to_int('-'+s, base) == -expected - assert string_to_int(s+'\n', base) == expected - assert string_to_int(' +'+s, base) == expected - assert string_to_int('-'+s+' ', base) == -expected - - def test_string_to_int_error(self): - cases = ['0x123', # must use base 0 or 16 - ' 0X12 ', - '', - '++12', - '+-12', - '-+12', - '--12', - '- 5', - '+ 5', - '12a6', - '12A6', - 'f', - 'Z', - '.', - '@', - ] - for s in cases: - raises(ValueError, string_to_int, s) - raises(ValueError, string_to_int, ' '+s) - raises(ValueError, string_to_int, s+' ') - raises(ValueError, string_to_int, '+'+s) - raises(ValueError, string_to_int, '-'+s) - - def test_string_to_int_base_error(self): - cases = [('1', 1), - ('1', 37), - ('a', 0), - ('9', 9), - ('0x123', 7), - ('145cdf', 15), - ('12', 37), - ('12', 98172), - ('12', -1), - ('12', -908), - ('12.3', 10), - ('12.3', 13), - ('12.3', 16), - ] - for s, base in cases: - raises(ValueError, string_to_int, s, base) - raises(ValueError, string_to_int, ' '+s, base) - raises(ValueError, string_to_int, s+' ', base) - raises(ValueError, string_to_int, '+'+s, base) - raises(ValueError, string_to_int, '-'+s, base) - - def test_string_to_long(self): - assert string_to_long('123L') == 123 - assert string_to_long('123L ') == 123 - raises(ValueError, string_to_long, 'L') - raises(ValueError, string_to_long, 'L ') - assert string_to_long('123L', 4) == 27 - assert string_to_long('123L', 30) == 27000 + 1800 + 90 + 21 - assert string_to_long('123L', 22) == 10648 + 968 + 66 + 21 - assert string_to_long('123L', 21) == 441 + 42 + 3 diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py deleted file mode 100644 index c4f64ff2cc..0000000000 --- a/pypy/objspace/std/test/test_tupleobject.py +++ /dev/null @@ -1,235 +0,0 @@ -#from __future__ import nested_scopes -import autopath -from pypy.objspace.std.tupleobject import W_TupleObject -from pypy.interpreter.error import OperationError - - -objspacename = 'std' - -class TestW_TupleObject: - - def test_is_true(self): - w = self.space.wrap - w_tuple = W_TupleObject(self.space, []) - assert self.space.is_true(w_tuple) == False - w_tuple = W_TupleObject(self.space, [w(5)]) - assert self.space.is_true(w_tuple) == True - w_tuple = W_TupleObject(self.space, [w(5), w(3)]) - assert self.space.is_true(w_tuple) == True - - def test_len(self): - w = self.space.wrap - w_tuple = W_TupleObject(self.space, []) - assert self.space.eq_w(self.space.len(w_tuple), w(0)) - w_tuple = W_TupleObject(self.space, [w(5)]) - assert self.space.eq_w(self.space.len(w_tuple), w(1)) - w_tuple = W_TupleObject(self.space, [w(5), w(3), w(99)]*111) - assert self.space.eq_w(self.space.len(w_tuple), w(333)) - - def test_getitem(self): - w = self.space.wrap - w_tuple = W_TupleObject(self.space, [w(5), w(3)]) - assert self.space.eq_w(self.space.getitem(w_tuple, w(0)), w(5)) - assert self.space.eq_w(self.space.getitem(w_tuple, w(1)), w(3)) - assert self.space.eq_w(self.space.getitem(w_tuple, w(-2)), w(5)) - assert self.space.eq_w(self.space.getitem(w_tuple, w(-1)), w(3)) - self.space.raises_w(self.space.w_IndexError, - self.space.getitem, w_tuple, w(2)) - self.space.raises_w(self.space.w_IndexError, - self.space.getitem, w_tuple, w(42)) - self.space.raises_w(self.space.w_IndexError, - self.space.getitem, w_tuple, w(-3)) - - def test_iter(self): - w = self.space.wrap - w_tuple = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_iter = self.space.iter(w_tuple) - assert self.space.eq_w(self.space.next(w_iter), w(5)) - assert self.space.eq_w(self.space.next(w_iter), w(3)) - assert self.space.eq_w(self.space.next(w_iter), w(99)) - raises(OperationError, self.space.next, w_iter) - raises(OperationError, self.space.next, w_iter) - - def test_contains(self): - w = self.space.wrap - w_tuple = W_TupleObject(self.space, [w(5), w(3), w(99)]) - assert self.space.eq_w(self.space.contains(w_tuple, w(5)), - self.space.w_True) - assert self.space.eq_w(self.space.contains(w_tuple, w(99)), - self.space.w_True) - assert self.space.eq_w(self.space.contains(w_tuple, w(11)), - self.space.w_False) - assert self.space.eq_w(self.space.contains(w_tuple, w_tuple), - self.space.w_False) - - def test_add(self): - w = self.space.wrap - w_tuple0 = W_TupleObject(self.space, []) - w_tuple1 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple2 = W_TupleObject(self.space, [w(-7)] * 111) - assert self.space.eq_w(self.space.add(w_tuple1, w_tuple1), - W_TupleObject(self.space, [w(5), w(3), w(99), - w(5), w(3), w(99)])) - assert self.space.eq_w(self.space.add(w_tuple1, w_tuple2), - W_TupleObject(self.space, - [w(5), w(3), w(99)] + [w(-7)] * 111)) - assert self.space.eq_w(self.space.add(w_tuple1, w_tuple0), w_tuple1) - assert self.space.eq_w(self.space.add(w_tuple0, w_tuple2), w_tuple2) - - def test_mul(self): - # only testing right mul at the moment - w = self.space.wrap - arg = w(2) - n = 3 - w_tup = W_TupleObject(self.space, [arg]) - w_tup3 = W_TupleObject(self.space, [arg]*n) - w_res = self.space.mul(w_tup, w(n)) - assert self.space.eq_w(w_tup3, w_res) - # commute - w_res = self.space.mul(w(n), w_tup) - assert self.space.eq_w(w_tup3, w_res) - - def test_getslice(self): - w = self.space.wrap - - def test1(testtuple, start, stop, step, expected): - w_slice = self.space.newslice(w(start), w(stop), w(step)) - w_tuple = W_TupleObject(self.space, [w(i) for i in testtuple]) - w_result = self.space.getitem(w_tuple, w_slice) - assert self.space.unwrap(w_result) == expected - - for testtuple in [(), (5,3,99), tuple(range(5,555,10))]: - for start in [-2, -1, 0, 1, 10]: - for end in [-1, 0, 2, 999]: - test1(testtuple, start, end, 1, testtuple[start:end]) - - test1((5,7,1,4), 3, 1, -2, (4,)) - test1((5,7,1,4), 3, 0, -2, (4, 7)) - test1((5,7,1,4), 3, -1, -2, ()) - test1((5,7,1,4), -2, 11, 2, (1,)) - test1((5,7,1,4), -3, 11, 2, (7, 4)) - test1((5,7,1,4), -5, 11, 2, (5, 1)) - - def test_eq(self): - w = self.space.wrap - - w_tuple0 = W_TupleObject(self.space, []) - w_tuple1 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple2 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple3 = W_TupleObject(self.space, [w(5), w(3), w(99), w(-1)]) - - assert self.space.eq_w(self.space.eq(w_tuple0, w_tuple1), - self.space.w_False) - assert self.space.eq_w(self.space.eq(w_tuple1, w_tuple0), - self.space.w_False) - assert self.space.eq_w(self.space.eq(w_tuple1, w_tuple1), - self.space.w_True) - assert self.space.eq_w(self.space.eq(w_tuple1, w_tuple2), - self.space.w_True) - assert self.space.eq_w(self.space.eq(w_tuple2, w_tuple3), - self.space.w_False) - def test_ne(self): - w = self.space.wrap - - w_tuple0 = W_TupleObject(self.space, []) - w_tuple1 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple2 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple3 = W_TupleObject(self.space, [w(5), w(3), w(99), w(-1)]) - - assert self.space.eq_w(self.space.ne(w_tuple0, w_tuple1), - self.space.w_True) - assert self.space.eq_w(self.space.ne(w_tuple1, w_tuple0), - self.space.w_True) - assert self.space.eq_w(self.space.ne(w_tuple1, w_tuple1), - self.space.w_False) - assert self.space.eq_w(self.space.ne(w_tuple1, w_tuple2), - self.space.w_False) - assert self.space.eq_w(self.space.ne(w_tuple2, w_tuple3), - self.space.w_True) - def test_lt(self): - w = self.space.wrap - - w_tuple0 = W_TupleObject(self.space, []) - w_tuple1 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple2 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple3 = W_TupleObject(self.space, [w(5), w(3), w(99), w(-1)]) - w_tuple4 = W_TupleObject(self.space, [w(5), w(3), w(9), w(-1)]) - - assert self.space.eq_w(self.space.lt(w_tuple0, w_tuple1), - self.space.w_True) - assert self.space.eq_w(self.space.lt(w_tuple1, w_tuple0), - self.space.w_False) - assert self.space.eq_w(self.space.lt(w_tuple1, w_tuple1), - self.space.w_False) - assert self.space.eq_w(self.space.lt(w_tuple1, w_tuple2), - self.space.w_False) - assert self.space.eq_w(self.space.lt(w_tuple2, w_tuple3), - self.space.w_True) - assert self.space.eq_w(self.space.lt(w_tuple4, w_tuple3), - self.space.w_True) - - def test_ge(self): - w = self.space.wrap - - w_tuple0 = W_TupleObject(self.space, []) - w_tuple1 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple2 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple3 = W_TupleObject(self.space, [w(5), w(3), w(99), w(-1)]) - w_tuple4 = W_TupleObject(self.space, [w(5), w(3), w(9), w(-1)]) - - assert self.space.eq_w(self.space.ge(w_tuple0, w_tuple1), - self.space.w_False) - assert self.space.eq_w(self.space.ge(w_tuple1, w_tuple0), - self.space.w_True) - assert self.space.eq_w(self.space.ge(w_tuple1, w_tuple1), - self.space.w_True) - assert self.space.eq_w(self.space.ge(w_tuple1, w_tuple2), - self.space.w_True) - assert self.space.eq_w(self.space.ge(w_tuple2, w_tuple3), - self.space.w_False) - assert self.space.eq_w(self.space.ge(w_tuple4, w_tuple3), - self.space.w_False) - - def test_gt(self): - w = self.space.wrap - - w_tuple0 = W_TupleObject(self.space, []) - w_tuple1 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple2 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple3 = W_TupleObject(self.space, [w(5), w(3), w(99), w(-1)]) - w_tuple4 = W_TupleObject(self.space, [w(5), w(3), w(9), w(-1)]) - - assert self.space.eq_w(self.space.gt(w_tuple0, w_tuple1), - self.space.w_False) - assert self.space.eq_w(self.space.gt(w_tuple1, w_tuple0), - self.space.w_True) - assert self.space.eq_w(self.space.gt(w_tuple1, w_tuple1), - self.space.w_False) - assert self.space.eq_w(self.space.gt(w_tuple1, w_tuple2), - self.space.w_False) - assert self.space.eq_w(self.space.gt(w_tuple2, w_tuple3), - self.space.w_False) - assert self.space.eq_w(self.space.gt(w_tuple4, w_tuple3), - self.space.w_False) - - def test_le(self): - w = self.space.wrap - - w_tuple0 = W_TupleObject(self.space, []) - w_tuple1 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple2 = W_TupleObject(self.space, [w(5), w(3), w(99)]) - w_tuple3 = W_TupleObject(self.space, [w(5), w(3), w(99), w(-1)]) - w_tuple4 = W_TupleObject(self.space, [w(5), w(3), w(9), w(-1)]) - - assert self.space.eq_w(self.space.le(w_tuple0, w_tuple1), - self.space.w_True) - assert self.space.eq_w(self.space.le(w_tuple1, w_tuple0), - self.space.w_False) - assert self.space.eq_w(self.space.le(w_tuple1, w_tuple1), - self.space.w_True) - assert self.space.eq_w(self.space.le(w_tuple1, w_tuple2), - self.space.w_True) - assert self.space.eq_w(self.space.le(w_tuple2, w_tuple3), - self.space.w_True) - assert self.space.eq_w(self.space.le(w_tuple4, w_tuple3), - self.space.w_True) diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py deleted file mode 100644 index 49a0f047f3..0000000000 --- a/pypy/objspace/std/test/test_typeobject.py +++ /dev/null @@ -1,193 +0,0 @@ -import autopath - -##class TestSpecialMultimethodCode(testit.TestCase): - -## def setUp(self): -## self.space = testit.objspace('std') - -## def tearDown(self): -## pass - -## def test_int_sub(self): -## w = self.space.wrap -## for i in range(2): -## meth = SpecialMultimethodCode(self.space.sub.multimethod, -## self.space.w_int.__class__, i) -## self.assertEqual(meth.slice().is_empty(), False) -## # test int.__sub__ and int.__rsub__ -## self.assertEqual_w(meth.eval_code(self.space, None, -## w({'x1': 5, 'x2': 7})), -## w(-2)) -## self.assertEqual_w(meth.eval_code(self.space, None, -## w({'x1': 5, 'x2': 7.1})), -## self.space.w_NotImplemented) -## self.assertEqual_w(meth.eval_code(self.space, None, -## w({'x1': 5.5, 'x2': 7})), -## self.space.w_NotImplemented) - -## def test_empty_inplace_add(self): -## for i in range(2): -## meth = SpecialMultimethodCode(self.space.inplace_add.multimethod, -## self.space.w_int.__class__, i) -## self.assertEqual(meth.slice().is_empty(), True) - -## def test_float_sub(self): -## w = self.space.wrap -## w(1.5) # force floatobject imported -## for i in range(2): -## meth = SpecialMultimethodCode(self.space.sub.multimethod, -## self.space.w_float.__class__, i) -## self.assertEqual(meth.slice().is_empty(), False) -## # test float.__sub__ and float.__rsub__ - -## # some of these tests are pointless for Python because -## # float.__(r)sub__ should not accept an int as first argument -## self.assertEqual_w(meth.eval_code(self.space, None, -## w({'x1': 5, 'x2': 7})), -## w(-2.0)) -## self.assertEqual_w(meth.eval_code(self.space, None, -## w({'x1': 5, 'x2': 7.5})), -## w(-2.5)) -## self.assertEqual_w(meth.eval_code(self.space, None, -## w({'x1': 5.5, 'x2': 7})), -## w(-1.5)) - -objspacename = 'std' - -class AppTestTypeObject: - def test_bases(self): - assert int.__bases__ == (object,) - class X: pass - assert X.__bases__ == (object,) - class Y(X): pass - assert Y.__bases__ == (X,) - class Z(Y,X): pass - assert Z.__bases__ == (Y, X) - - def test_builtin_add(self): - x = 5 - assert x.__add__(6) == 11 - x = 3.5 - assert x.__add__(2) == 5.5 - assert x.__add__(2.0) == 5.5 - - def test_builtin_call(self): - def f(*args): - return args - assert f.__call__() == () - assert f.__call__(5) == (5,) - assert f.__call__("hello", "world") == ("hello", "world") - - def test_builtin_call_kwds(self): - def f(*args, **kwds): - return args, kwds - assert f.__call__() == ((), {}) - assert f.__call__("hello", "world") == (("hello", "world"), {}) - assert f.__call__(5, bla=6) == ((5,), {"bla": 6}) - assert f.__call__(a=1, b=2, c=3) == ((), {"a": 1, "b": 2, - "c": 3}) - - def test_multipleinheritance_fail(self): - try: - class A(int, dict): - pass - except TypeError: - pass - else: - raise AssertionError, "this multiple inheritance should fail" - - def test_outer_metaclass(self): - class OuterMetaClass(type): - pass - - class HasOuterMetaclass(object): - __metaclass__ = OuterMetaClass - - assert type(HasOuterMetaclass) == OuterMetaClass - assert type(HasOuterMetaclass) == HasOuterMetaclass.__metaclass__ - - def test_inner_metaclass(self): - class HasInnerMetaclass(object): - class __metaclass__(type): - pass - - assert type(HasInnerMetaclass) == HasInnerMetaclass.__metaclass__ - - def test_implicit_metaclass(self): - global __metaclass__ - try: - old_metaclass = __metaclass__ - has_old_metaclass = True - except NameError: - has_old_metaclass = False - - class __metaclass__(type): - pass - - class HasImplicitMetaclass: - pass - - try: - assert type(HasImplicitMetaclass) == __metaclass__ - finally: - if has_old_metaclass: - __metaclass__ = old_metaclass - else: - del __metaclass__ - - - def test_mro(self): - class A_mro(object): - a = 1 - - class B_mro(A_mro): - b = 1 - class __metaclass__(type): - def mro(self): - return [self, object] - - assert B_mro.__bases__ == (A_mro,) - assert B_mro.__mro__ == (B_mro, object) - assert B_mro.mro() == [B_mro, object] - assert B_mro.b == 1 - assert B_mro().b == 1 - assert getattr(B_mro, 'a', None) == None - assert getattr(B_mro(), 'a', None) == None - - def test_nodoc(self): - class NoDoc(object): - pass - - try: - assert NoDoc.__doc__ == None - except AttributeError: - raise AssertionError, "__doc__ missing!" - - def test_explicitdoc(self): - class ExplicitDoc(object): - __doc__ = 'foo' - - assert ExplicitDoc.__doc__ == 'foo' - - def test_implicitdoc(self): - class ImplicitDoc(object): - "foo" - - assert ImplicitDoc.__doc__ == 'foo' - - def test_immutabledoc(self): - class ImmutableDoc(object): - "foo" - - try: - ImmutableDoc.__doc__ = "bar" - except TypeError: - pass - except AttributeError: - # XXX - Python raises TypeError for several descriptors, - # we always raise AttributeError. - pass - else: - raise AssertionError, '__doc__ should not be writable' - - assert ImmutableDoc.__doc__ == 'foo' diff --git a/pypy/objspace/std/test/test_unicodestring.py b/pypy/objspace/std/test/test_unicodestring.py deleted file mode 100644 index 3f1414d017..0000000000 --- a/pypy/objspace/std/test/test_unicodestring.py +++ /dev/null @@ -1,40 +0,0 @@ -# test the integration of unicode and strings (even though we don't -# really implement unicode yet). - -import autopath, sys - - -objspacename = 'std' - -class AppTestUnicodeStringStdOnly: - def test_compares(self): - assert u'a' == 'a' - assert 'a' == u'a' - assert not u'a' == 'b' # xxx u'a' != 'b' fails - assert not 'a' == u'b'# xxx 'a' != u'b' fails - -class AppTestUnicodeString: - def test_addition(self): - def check(a, b): - assert a == b - assert type(a) == type(b) - check(u'a' + 'b', u'ab') - check('a' + u'b', u'ab') - - def test_join(self): - def check(a, b): - assert a == b - assert type(a) == type(b) - check(', '.join([u'a']), u'a') - check(', '.join(['a', u'b']), u'a, b') - check(u', '.join(['a', 'b']), u'a, b') - - if sys.version_info >= (2,3): - def test_contains_ex(self): - assert u'' in 'abc' - assert u'bc' in 'abc' - assert 'bc' in 'abc' - - def test_contains(self): - assert u'a' in 'abc' - assert 'a' in u'abc' diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py deleted file mode 100644 index 1737f60c0f..0000000000 --- a/pypy/objspace/std/test/test_userobject.py +++ /dev/null @@ -1,143 +0,0 @@ -import autopath - -objspacename = 'std' - -class AppTestUserObject: - def test_emptyclass(self): - class empty: pass - inst = empty() - assert isinstance(inst, empty) - inst.attr=23 - assert inst.attr ==23 - - def test_method(self): - class A: - def f(self, v): - return v*42 - a = A() - assert a.f('?') == '??????????????????????????????????????????' - - def test_unboundmethod(self): - class A: - def f(self, v): - return v*17 - a = A() - assert A.f(a, '!') == '!!!!!!!!!!!!!!!!!' - - def test_subclassing(self): - for base in tuple, list, dict, str, int, float: - try: - class subclass(base): pass - stuff = subclass() - except: - print 'not subclassable:', base - if base is not dict: # XXX must be fixed - raise - else: - assert isinstance(stuff, base) - - def test_subclasstuple(self): - class subclass(tuple): pass - stuff = subclass() - assert isinstance(stuff, tuple) - stuff.attr = 23 - assert stuff.attr ==23 - assert len(stuff) ==0 - result = stuff + (1,2,3) - assert len(result) ==3 - - def test_subsubclass(self): - class base: - baseattr = 12 - class derived(base): - derivedattr = 34 - inst = derived() - assert isinstance(inst, base) - assert inst.baseattr ==12 - assert inst.derivedattr ==34 - - def test_descr_get(self): - class C: - class desc: - def __get__(self, ob, cls=None): - return 42 - prop = desc() - assert C().prop == 42 - - def test_descr_set(self): - class C: - class desc: - def __set__(self, ob, val): - ob.wibble = val - prop = desc() - c = C() - c.prop = 32 - assert c.wibble == 32 - - def test_descr_delete(self): - class C: - class desc: - def __set__(self, ob, val): - oogabooga - def __delete__(self, ob): - ob.wibble = 22 - prop = desc() - c = C() - del c.prop - assert c.wibble == 22 - - def test_class_setattr(self): - class C: - pass - C.a = 1 - assert hasattr(C, 'a') - assert C.a == 1 - - def test_add(self): - class C: - def __add__(self, other): - return self, other - c1 = C() - assert c1+3 == (c1, 3) - - def test_call(self): - class C: - def __call__(self, *args): - return args - c1 = C() - assert c1() == () - assert c1(5) == (5,) - assert c1("hello", "world") == ("hello", "world") - - def test_getattribute(self): - class C: - def __getattribute__(self, name): - return '->' + name - c1 = C() - assert c1.a == '->a' - c1.a = 5 - assert c1.a == '->a' - - def test_getattr(self): - class C: - def __getattr__(self, name): - return '->' + name - c1 = C() - assert c1.a == '->a' - c1.a = 5 - assert c1.a == 5 - - def test_dict(self): - class A(object): - pass - class B(A): - pass - assert not '__dict__' in object.__dict__ - assert '__dict__' in A.__dict__ - assert not '__dict__' in B.__dict__ - a = A() - a.x = 5 - assert a.__dict__ == {'x': 5} - a.__dict__ = {'y': 6} - assert a.y == 6 - assert not hasattr(a, 'x') diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py deleted file mode 100644 index c34dfe4d67..0000000000 --- a/pypy/objspace/std/tupleobject.py +++ /dev/null @@ -1,118 +0,0 @@ -from pypy.objspace.std.objspace import * -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.sliceobject import W_SliceObject -from pypy.objspace.std import slicetype - - -class W_TupleObject(W_Object): - from pypy.objspace.std.tupletype import tuple_typedef as typedef - - def __init__(w_self, space, wrappeditems): - W_Object.__init__(w_self, space) - w_self.wrappeditems = wrappeditems # a list of wrapped values - - def __repr__(w_self): - """ representation for debugging purposes """ - reprlist = [repr(w_item) for w_item in w_self.wrappeditems] - return "%s(%s)" % (w_self.__class__.__name__, ', '.join(reprlist)) - - -registerimplementation(W_TupleObject) - - -def unwrap__Tuple(space, w_tuple): - items = [space.unwrap(w_item) for w_item in w_tuple.wrappeditems] - return tuple(items) - -def len__Tuple(space, w_tuple): - result = len(w_tuple.wrappeditems) - return W_IntObject(space, result) - -def getitem__Tuple_Int(space, w_tuple, w_index): - items = w_tuple.wrappeditems - try: - w_item = items[w_index.intval] - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("tuple index out of range")) - return w_item - -def getitem__Tuple_Slice(space, w_tuple, w_slice): - items = w_tuple.wrappeditems - length = len(items) - start, stop, step, slicelength = slicetype.indices4(space, w_slice, length) - assert slicelength >= 0 - subitems = [None] * slicelength - for i in range(slicelength): - subitems[i] = items[start] - start += step - return W_TupleObject(space, subitems) - -def iter__Tuple(space, w_tuple): - from pypy.objspace.std import iterobject - return iterobject.W_SeqIterObject(space, w_tuple) - -def add__Tuple_Tuple(space, w_tuple1, w_tuple2): - items1 = w_tuple1.wrappeditems - items2 = w_tuple2.wrappeditems - return W_TupleObject(space, items1 + items2) - -def mul__Tuple_Int(space, w_tuple, w_int): - items = w_tuple.wrappeditems - times = w_int.intval - return W_TupleObject(space, items * times) - - -def mul__Int_Tuple(space, w_int, w_tuple): - return mul__Tuple_Int(space, w_tuple, w_int) - -def eq__Tuple_Tuple(space, w_tuple1, w_tuple2): - items1 = w_tuple1.wrappeditems - items2 = w_tuple2.wrappeditems - if len(items1) != len(items2): - return space.w_False - for item1, item2 in zip(items1, items2): - if not space.is_true(space.eq(item1, item2)): - return space.w_False - return space.w_True - -def _min(a, b): - if a < b: - return a - return b - -def lt__Tuple_Tuple(space, w_tuple1, w_tuple2): - items1 = w_tuple1.wrappeditems - items2 = w_tuple2.wrappeditems - ncmp = _min(len(items1), len(items2)) - # Search for the first index where items are different - for p in range(ncmp): - if not space.is_true(space.eq(items1[p], items2[p])): - return space.lt(items1[p], items2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(items1) < len(items2)) - -def gt__Tuple_Tuple(space, w_tuple1, w_tuple2): - items1 = w_tuple1.wrappeditems - items2 = w_tuple2.wrappeditems - ncmp = _min(len(items1), len(items2)) - # Search for the first index where items are different - for p in range(ncmp): - if not space.is_true(space.eq(items1[p], items2[p])): - return space.gt(items1[p], items2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(items1) > len(items2)) - -def app_repr__Tuple(t): - if len(t) == 1: - return "(" + repr(t[0]) + ",)" - else: - return "(" + ", ".join([repr(x) for x in t]) + ')' - -def hash__Tuple(space, w_tuple): - # silly-ish, but _correct_, while lacking it would be WRONG - return space.len(w_tuple) - -from pypy.interpreter import gateway -gateway.importall(globals()) -register_all(vars()) diff --git a/pypy/objspace/std/tupletype.py b/pypy/objspace/std/tupletype.py deleted file mode 100644 index 88ae506632..0000000000 --- a/pypy/objspace/std/tupletype.py +++ /dev/null @@ -1,18 +0,0 @@ -from pypy.objspace.std.stdtypedef import * - - -def descr__new__(space, w_tupletype, w_items=None): - from pypy.objspace.std.tupleobject import W_TupleObject - if w_items is None: - tuple_w = [] - else: - tuple_w = space.unpackiterable(w_items) - w_obj = space.allocate_instance(W_TupleObject, w_tupletype) - w_obj.__init__(space, tuple_w) - return w_obj - -# ____________________________________________________________ - -tuple_typedef = StdTypeDef("tuple", - __new__ = newmethod(descr__new__), - ) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py deleted file mode 100644 index 31a99e6a0f..0000000000 --- a/pypy/objspace/std/typeobject.py +++ /dev/null @@ -1,218 +0,0 @@ -from pypy.objspace.std.objspace import * -from pypy.interpreter.function import Function, StaticMethod -from pypy.interpreter.typedef import default_dict_descr -from pypy.interpreter.argument import Arguments -from pypy.objspace.std.stdtypedef import issubtypedef -from pypy.objspace.std.objecttype import object_typedef - -class W_TypeObject(W_Object): - from pypy.objspace.std.typetype import type_typedef as typedef - - def __init__(w_self, space, name, bases_w, dict_w, - overridetypedef=None, forcedict=True): - W_Object.__init__(w_self, space) - w_self.name = name - w_self.bases_w = bases_w - w_self.dict_w = dict_w - w_self.ensure_static__new__() - - w_self.mro_w = compute_C3_mro(w_self) - if overridetypedef is not None: - w_self.instancetypedef = overridetypedef - else: - # find the most specific typedef - instancetypedef = object_typedef - for w_base in bases_w: - if issubtypedef(w_base.instancetypedef, instancetypedef): - instancetypedef = w_base.instancetypedef - elif not issubtypedef(instancetypedef, w_base.instancetypedef): - raise OperationError(space.w_TypeError, - space.wrap("instance layout conflicts in " - "multiple inheritance")) - w_self.instancetypedef = instancetypedef - # XXX - this w_self.lookup('__dict__') is why we precalculate a C3 mro - # even for instances that may override the mro. This probably is not - # a good thing to do. - if forcedict and not w_self.lookup('__dict__'): - w_self.dict_w['__dict__'] = space.wrap(default_dict_descr) - if overridetypedef is None: - w_type = space.type(w_self) - if not space.is_true(space.is_(w_type, space.w_type)): - mro_func = w_type.lookup('mro') - mro_func_args = Arguments(space, [w_self]) - w_mro = space.call_args(mro_func, mro_func_args) - w_self.mro_w = space.unpackiterable(w_mro) - - - def ensure_static__new__(w_self): - # special-case __new__, as in CPython: - # if it is a Function, turn it into a static method - if '__new__' in w_self.dict_w: - w_new = w_self.dict_w['__new__'] - if isinstance(w_new, Function): - w_self.dict_w['__new__'] = StaticMethod(w_new) - - def lookup(w_self, key): - # note that this doesn't call __get__ on the result at all - space = w_self.space - for w_class in w_self.mro_w: - try: - return w_class.dict_w[key] - except KeyError: - pass - return None - - def lookup_where(w_self, key): - # like lookup() but also returns the parent class in which the - # attribute was found - space = w_self.space - for w_class in w_self.mro_w: - try: - return w_class, w_class.dict_w[key] - except KeyError: - pass - return None, None - - def check_user_subclass(w_self, w_subtype): - space = w_self.space - if not space.is_true(space.isinstance(w_subtype, space.w_type)): - raise OperationError(space.w_TypeError, - space.wrap("X is not a type object (%s)" % ( - space.type(w_subtype).name))) - if not space.is_true(space.issubtype(w_subtype, w_self)): - raise OperationError(space.w_TypeError, - space.wrap("%s.__new__(%s): %s is not a subtype of %s" % ( - w_self.name, w_subtype.name, w_subtype.name, w_self.name))) - if w_self.instancetypedef is not w_subtype.instancetypedef: - raise OperationError(space.w_TypeError, - space.wrap("%s.__new__(%s) is not safe, use %s.__new__()" % ( - w_self.name, w_subtype.name, w_subtype.name))) - - def getdict(w_self): - # XXX should return a <dictproxy object> - space = w_self.space - dictspec = [] - for key, w_value in w_self.dict_w.items(): - dictspec.append((space.wrap(key), w_value)) - return space.newdict(dictspec) - - def setdict(w_self, w_dict): - space = w_self.space - raise OperationError(space.w_TypeError, - space.wrap("attribute '__dict__' of type objects " - "is not writable")) - - -def call__Type(space, w_type, w_args, w_kwds): - args = Arguments.frompacked(space, w_args, w_kwds) - # special case for type(x) - if space.is_true(space.is_(w_type, space.w_type)): - try: - w_obj, = args.fixedunpack(1) - except ValueError: - pass - else: - return space.type(w_obj) - # invoke the __new__ of the type - w_newfunc = space.getattr(w_type, space.wrap('__new__')) - w_newobject = space.call_args(w_newfunc, args.prepend(w_type)) - # maybe invoke the __init__ of the type - if space.is_true(space.isinstance(w_newobject, w_type)): - w_descr = space.lookup(w_newobject, '__init__') - space.get_and_call_args(w_descr, w_newobject, args) - return w_newobject - -def issubtype__Type_Type(space, w_type1, w_type2): - return space.newbool(w_type2 in w_type1.mro_w) - -def repr__Type(space, w_obj): - return space.wrap("<pypy type '%s'>" % w_obj.name) # XXX remove 'pypy' - -def getattr__Type_ANY(space, w_type, w_name): - name = space.unwrap(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - return space.get(w_descr,w_type) - w_value = w_type.lookup(name) - if w_value is not None: - # __get__(None, type): turns e.g. functions into unbound methods - return space.get(w_value, space.w_None, w_type) - if w_descr is not None: - return space.get(w_descr,w_type) - raise OperationError(space.w_AttributeError,w_name) - -def setattr__Type_ANY_ANY(space, w_type, w_name, w_value): - name = space.unwrap(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - space.set(w_descr,w_type,space.type(w_type)) - w_type.dict_w[name] = w_value - -def delattr__Type_ANY(space, w_type, w_name): - name = space.unwrap(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - space.delete(w_descr, space.type(w_type)) - del w_type.dict_w[name] - -# XXX __delattr__ -# XXX __hash__ ?? - -def unwrap__Type(space, w_type): - if hasattr(w_type.instancetypedef, 'fakedcpytype'): - return w_type.instancetypedef.fakedcpytype - raise FailedToImplement - -# ____________________________________________________________ - -def compute_C3_mro(cls): - order = [] - orderlists = [list(base.mro_w) for base in cls.bases_w] - orderlists.append([cls] + cls.bases_w) - while orderlists: - for candidatelist in orderlists: - candidate = candidatelist[0] - if mro_blockinglist(candidate, orderlists) is None: - break # good candidate - else: - return mro_error(orderlists) # no candidate found - assert candidate not in order - order.append(candidate) - for i in range(len(orderlists)-1, -1, -1): - if orderlists[i][0] == candidate: - del orderlists[i][0] - if len(orderlists[i]) == 0: - del orderlists[i] - return order - -def mro_blockinglist(candidate, orderlists): - for lst in orderlists: - if candidate in lst[1:]: - return lst - return None # good candidate - -def mro_error(orderlists): - cycle = [] - candidate = orderlists[-1][0] - space = candidate.space - if candidate in orderlists[-1][1:]: - # explicit error message for this specific case - raise OperationError(space.w_TypeError, - space.wrap("duplicate base class " + candidate.name)) - while candidate not in cycle: - cycle.append(candidate) - nextblockinglist = mro_blockinglist(candidate, orderlists) - candidate = nextblockinglist[0] - del cycle[:cycle.index(candidate)] - cycle.append(candidate) - cycle.reverse() - names = [cls.name for cls in cycle] - raise OperationError(space.w_TypeError, - space.wrap("cycle among base classes: " + ' < '.join(names))) - -# ____________________________________________________________ - -register_all(vars()) diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py deleted file mode 100644 index ad2560e73f..0000000000 --- a/pypy/objspace/std/typetype.py +++ /dev/null @@ -1,55 +0,0 @@ -from pypy.objspace.std.stdtypedef import * -from pypy.objspace.std.dictproxyobject import dictproxy_descr - - -def descr__new__(space, w_typetype, w_name, w_bases, w_dict): - "This is used to create user-defined classes only." - from pypy.objspace.std.typeobject import W_TypeObject - # XXX check types - name = space.unwrap(w_name) - assert isinstance(name, str) - bases_w = space.unpackiterable(w_bases) - dict_w = {} - dictkeys_w = space.unpackiterable(w_dict) - for w_key in dictkeys_w: - key = space.unwrap(w_key) - assert isinstance(key, str) - dict_w[key] = space.getitem(w_dict, w_key) - w_type = space.allocate_instance(W_TypeObject, w_typetype) - w_type.__init__(space, name, bases_w or [space.w_object], dict_w) - return w_type - -def descr_get__mro__(space, w_type): - # XXX this should be inside typeobject.py - return space.newtuple(w_type.mro_w) - -def descr_mro(space, w_type): - return space.newlist(w_type.mro_w) - -def descr__bases(space, w_type): - return space.newtuple(w_type.bases_w) - -def descr__base(space, w_type): - if w_type is space.w_object: - return space.w_None - b = w_type.instancetypedef.base - if b is not None: - return space.gettypeobject(b) - else: - return space.w_object - -def descr__doc(space, w_type): - return w_type.dict_w.get('__doc__') - -# ____________________________________________________________ - -type_typedef = StdTypeDef("type", - __new__ = newmethod(descr__new__), - __name__ = attrproperty('name'), - __bases__ = GetSetProperty(descr__bases), - __base__ = GetSetProperty(descr__base), - __mro__ = GetSetProperty(descr_get__mro__), - __dict__ = dictproxy_descr, - __doc__ = GetSetProperty(descr__doc), - mro = newmethod(descr_mro), - ) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py deleted file mode 100644 index f91fe1a8cd..0000000000 --- a/pypy/objspace/std/unicodeobject.py +++ /dev/null @@ -1,41 +0,0 @@ -from pypy.objspace.std.objspace import * -from pypy.objspace.std.fake import fake_type, wrap_exception -from pypy.objspace.std.stringobject import W_StringObject - -W_UnicodeObject = fake_type(unicode) - -# string-to-unicode delegation -def delegate__String(space, w_str): - return W_UnicodeObject(space, unicode(space.unwrap(w_str))) -delegate__String.result_class = W_UnicodeObject -delegate__String.priority = PRIORITY_CHANGE_TYPE -delegate__String.can_fail = True - -def eq__Unicode_ANY(space, w_uni, w_other): - try: - return space.newbool(space.unwrap(w_uni) == space.unwrap(w_other)) - except: - wrap_exception(space) - -def lt__Unicode_ANY(space, w_uni, w_other): - try: - return space.newbool(space.unwrap(w_uni) < space.unwrap(w_other)) - except: - wrap_exception(space) - -def ord__Unicode(space, w_uni): - try: - return space.wrap(ord(w_uni.val)) - except: - wrap_exception(space) - -def add__Unicode_Unicode(space, w_left, w_right): - return space.wrap(space.unwrap(w_left) + space.unwrap(w_right)) - -def contains__String_Unicode(space, w_left, w_right): - return space.wrap(space.unwrap(w_right) in space.unwrap(w_left)) - -def contains__Unicode_Unicode(space, w_left, w_right): - return space.wrap(space.unwrap(w_right) in space.unwrap(w_left)) - -register_all(vars()) diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py deleted file mode 100644 index 9c1b0bd162..0000000000 --- a/pypy/objspace/std/unicodetype.py +++ /dev/null @@ -1,3 +0,0 @@ -from pypy.objspace.std.fake import fake_type - -unicode_typedef = fake_type(unicode).typedef diff --git a/pypy/objspace/test/__init__.py b/pypy/objspace/test/__init__.py deleted file mode 100644 index 792d600548..0000000000 --- a/pypy/objspace/test/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# diff --git a/pypy/objspace/test/autopath.py b/pypy/objspace/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/objspace/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/objspace/test/test_descriptor.py b/pypy/objspace/test/test_descriptor.py deleted file mode 100644 index bc09fa8f84..0000000000 --- a/pypy/objspace/test/test_descriptor.py +++ /dev/null @@ -1,25 +0,0 @@ -import autopath - -class AppTest_Descriptor: - - def test_non_data_descr(self): - class X(object): - def f(self): - return 42 - x = X() - assert x.f() == 42 - x.f = 43 - assert x.f == 43 - del x.f - assert x.f() == 42 - - def test_member(self): - import sys - assert sys.stdin.softspace == 0 - assert file.softspace.__get__(sys.stdin) == 0 - sys.stdin.softspace = 1 - assert sys.stdin.softspace == 1 - file.softspace.__set__(sys.stdin, 0) - assert sys.stdin.softspace == 0 - raises(TypeError, delattr, sys.stdin, 'softspace') - raises(TypeError, file.softspace.__delete__, sys.stdin) diff --git a/pypy/objspace/test/test_traceobjspace.py b/pypy/objspace/test/test_traceobjspace.py deleted file mode 100644 index 64dffd1e7a..0000000000 --- a/pypy/objspace/test/test_traceobjspace.py +++ /dev/null @@ -1,70 +0,0 @@ -import autopath -from pypy.objspace import trace -from pypy.tool import pydis -from pypy.interpreter import gateway - -class Test_TraceObjSpace: - - def setup_method(self,method): - trace.create_trace_space(self.space) - - def teardown_method(self,method): - self.space.reset_trace() - - def perform_trace(self, app_func): - tspace = self.space - func_gw = gateway.app2interp_temp(app_func) - func = func_gw.get_function(tspace) - tspace.settrace() - tspace.call_function(tspace.wrap(func)) - res = tspace.getresult() - return res - - def test_traceobjspace_basic(self): - tspace = self.space - assert tspace.is_true(tspace.w_builtins) - #for name, value in vars(self.space).items(): - # if not name.startswith('_'): - # self.assert_(value is getattr(t, name)) - #self.assert_(t.is_true(t.make_standard_globals())) - - def test_simpletrace(self): - def app_f(): - pass - res = self.perform_trace(app_f) - disresult = pydis.pydis(app_f) - assert disresult.bytecodes == list(res.getbytecodes()) - - def test_some_builtin1(self): - def app_f(): - len([1,2,3,4,5]) - res = self.perform_trace(app_f) - disresult = pydis.pydis(app_f) - assert len(disresult.bytecodes) == len(list(res.getbytecodes())) - - def test_some_builtin2(self): - def app_f(): - filter(None, []) # filter implemented in appspace -> has many more bytecodes - res = self.perform_trace(app_f) - disresult = pydis.pydis(app_f) - assert len(disresult.bytecodes) < len(list(res.getbytecodes())) - - def get_operation(self, iter, optype, name): - for op in iter: - if isinstance(op, optype): - if op.callinfo.name == name: - return op - - def test_trace_oneop(self): - def app_f(): - x = 1 - x + 1 - res = self.perform_trace(app_f) - disresult = pydis.pydis(app_f) - uw = self.space.unwrap - ops = res.getoperations() - op_start = self.get_operation(ops, trace.CallBegin, "add") - args = [uw(x) for x in op_start.callinfo.args] - assert args == [1, 1] - op_end = self.get_operation(ops, trace.CallFinished, "add") - assert uw(op_end.res) == 2 diff --git a/pypy/objspace/trace.py b/pypy/objspace/trace.py deleted file mode 100644 index 4cdc1cafbd..0000000000 --- a/pypy/objspace/trace.py +++ /dev/null @@ -1,216 +0,0 @@ -""" - Trace object space traces operations and bytecode execution - in frames. - -""" - -from __future__ import generators -from pypy.tool import pydis -from pypy.interpreter.baseobjspace import ObjSpace - -# __________________________________________________________________________ -# -# Tracing Events -# __________________________________________________________________________ -# - -class ExecBytecode(object): - """ bytecode trace. """ - def __init__(self, frame): - self.frame = frame - self.code = frame.code - self.index = frame.next_instr - -class EnterFrame(object): - def __init__(self, frame): - self.frame = frame - -class LeaveFrame(object): - def __init__(self, frame): - self.frame = frame - -class CallInfo(object): - """ encapsulates a function call with its arguments. """ - def __init__(self, name, func, args, kwargs): - self.name = name - self.func = func - self.args = args - self.kwargs = kwargs - -class CallBegin(object): - def __init__(self, callinfo): - self.callinfo = callinfo - -class CallFinished(object): - def __init__(self, callinfo, res): - self.callinfo = callinfo - self.res = res - -class CallException(object): - def __init__(self, callinfo, e): - self.callinfo = callinfo - self.ex = e - -class TraceResult(object): - """ this is the state of tracing-in-progress. """ - def __init__(self, tracespace): - self.events = [] - self.tracespace = tracespace - - def append(self, arg): - self.events.append(arg) - - def getdisresult(self, frame, _cache = {}): - """ return (possibly cached) pydis result for the given frame. """ - try: - return _cache[id(frame.code)] - except KeyError: - res = _cache[id(frame.code)] = pydis.pydis(frame.code) - assert res is not None - return res - - def getbytecodes(self): - for event in self.events: - if isinstance(event, ExecBytecode): - disres = self.getdisresult(event.frame) - yield disres.getbytecode(event.index) - - def getoperations(self): - for event in self.events: - if isinstance(event, (CallBegin, CallFinished, CallException)): - yield event - - def getevents(self): - for event in self.events: - yield event - -# __________________________________________________________________________ -# -# Tracer Proxy objects -# __________________________________________________________________________ -# - -class ExecutionContextTracer(object): - def __init__(self, result, ec): - self.__ec = ec - self.__result = result - - def __getattr__(self, name): - """ generically pass through everything else ... """ - return getattr(self.__ec, name) - - def enter(self, frame): - """ called just before (continuing to) evaluating a frame. """ - self.__result.append(EnterFrame(frame)) - return self.__ec.enter(frame) - - def leave(self, previous_ec): - """ called just after evaluating of a frame is suspended/finished. """ - frame = self.__ec.framestack.top() - self.__result.append(LeaveFrame(frame)) - return self.__ec.leave(previous_ec) - - def bytecode_trace(self, frame): - """ called just before execution of a bytecode. """ - self.__result.append(ExecBytecode(frame)) - -class CallableTracer(object): - def __init__(self, result, name, func): - self.__result = result - self.__name = name - self.__func = func - - def __call__(self, *args, **kwargs): - callinfo = CallInfo(self.__name, self.__func, args, kwargs) - self.__result.append(CallBegin(callinfo)) - - try: - res = self.__func(*args, **kwargs) - except Exception, e: - self.__result.append(CallException(callinfo, e)) - raise - else: - self.__result.append(CallFinished(callinfo, res)) - return res - - def __getattr__(self, name): - """ generically pass through everything we don't intercept. """ - return getattr(self.__func, name) - - def __str__(self): - return "%s - CallableTracer(%s)" % (self.__name, self.__func) - __repr = __str__ -# __________________________________________________________________________ -# -# Tracer factory -# __________________________________________________________________________ -# - -operations = None -def get_operations(): - global operations - if operations is None: - operations = dict([(r[0], r[0]) for r in ObjSpace.MethodTable]) - for name in ["is_true", "newtuple", "newlist", "newstring", "newdict", - "newslice", "call_args", "is_", "get_and_call_function", - "wrap", "unwrap"]: - operations[name] = name - - return operations - -def create_trace_space(space = None, operations = None): - """ Will create a trace object space if no space supplied. Otherwise - will turn the supplied into a tracable space by extending its class.""" - - # Don't trace an already tracable space - if hasattr(space, "__pypytrace___"): - return space - - if space is None: - # make up a TrivialObjSpace by default - # ultimately, remove this hack and fix the -P option of tests - from pypy.objspace import trivial - space = trivial.TrivialObjSpace() - - if operations is None: - operations = get_operations() - - class Trace(space.__class__): - - def __getattribute__(self, name): - obj = super(Trace, self).__getattribute__(name) - if name in operations: - assert callable(obj) - obj = CallableTracer(self._result, name, obj) - return obj - - def __pypytrace___(self): - pass - - def settrace(self): - self._result = TraceResult(self) - - def getresult(self): - return self._result - - def getexecutioncontext(self): - ec = super(Trace, self).getexecutioncontext() - assert not isinstance(ec, ExecutionContextTracer) - return ExecutionContextTracer(self._result, ec) - - def reset_trace(self): - """ Returns the class to it's original form. """ - space.__class__ = space.__oldclass___ - del space.__oldclass___ - - if hasattr(self, "_result"): - del self._result - - trace_clz = type("Trace" + space.__class__.__name__, (Trace,), {}) - space.__oldclass___, space.__class__ = space.__class__, trace_clz - space.settrace() - return space - -# ______________________________________________________________________ -# End of trace.py - diff --git a/pypy/objspace/trivial.py b/pypy/objspace/trivial.py deleted file mode 100644 index a9efac348b..0000000000 --- a/pypy/objspace/trivial.py +++ /dev/null @@ -1,355 +0,0 @@ -# -# Trivial object space for testing -# Does not perform any wrapping and (more importantly) does not -# correctly wrap the exceptions. -# - -from pypy.interpreter import gateway -from pypy.interpreter.baseobjspace import * -from pypy.objspace.descroperation import DescrOperation, Object -from pypy.interpreter.argument import Arguments -import types, sys -import __builtin__ as cpy_builtin - -class CPyWrapper(object): - pass - -class TrivialObjSpace(ObjSpace, DescrOperation): - - def clone_exception_hierarchy(self): - def __init__(self, *args): - self.args = args - def __str__(self): - l = len(self.args) - if l == 0: - return '' - elif l == 1: - return str(self.args[0]) - else: - return str(self.args) - import exceptions - - # to create types, we should call the standard type object; - # but being able to do that depends on the existence of some - # of the exceptions... - - self.w_Exception = type('Exception', (), - {'__init__':__init__, '__str__': __str__, - 'originalex': Exception}) - - done = {'Exception': self.w_Exception} - - # some of the complexity of the following is due to the fact - # that we need to create the tree root first, but the only - # connections we have go in the inconvenient direction... - - for k in dir(exceptions): - if k not in done: - v = getattr(exceptions, k) - if not isinstance(v, type(Exception)): - continue - if not issubclass(v, Exception): - continue - stack = [k] - while stack: - next = stack[-1] - if next not in done: - v = getattr(exceptions, next) - b = v.__bases__[0] - if b.__name__ not in done: - stack.append(b.__name__) - continue - else: - base = done[b.__name__] - newtype = type(next, (base,), {}) - setattr(self, 'w_' + next, newtype) - newtype.originalex = v - done[next] = newtype - stack.pop() - else: - stack.pop() - return done - - def initialize(self): - from pypy.interpreter.typedef import TypeDef, GetSetProperty - - self.object_typedef = TypeDef('object', - __getattribute__ = gateway.interp2app(Object.descr__getattribute__.im_func), - __setattr__ = gateway.interp2app(Object.descr__setattr__.im_func), - __delattr__ = gateway.interp2app(Object.descr__delattr__.im_func), - __str__ = gateway.interp2app(lambda space, w_x: space.repr(w_x)), - __repr__ = gateway.interp2app(lambda space, w_x: repr(w_x)), - __class__ = GetSetProperty(self.__class__.type), - __init__ = gateway.interp2app(Object.descr__init__.im_func), - __dict__ = GetSetProperty(self.__class__.getdict_or_complain), - ) - # make a wrapped None object - none_typedef = TypeDef('NoneType', - __nonzero__ = gateway.interp2app(lambda space, w_None: - space.w_False), - __repr__ = gateway.interp2app(lambda space, w_None: - space.wrap('None'))) - nonewrapperclass = self.hackwrapperclass(none_typedef) - self.w_None = CPyWrapper.__new__(nonewrapperclass) - instancedict = CPyWrapper.__dict__['__dict__'].__get__(self.w_None) - instancedict['__internalpypyobject__'] = None - - self.w_True = True - self.w_False = False - self.w_NotImplemented = NotImplemented - self.w_Ellipsis = Ellipsis - newstuff = {"False": self.w_False, - "True" : self.w_True, - "NotImplemented" : self.w_NotImplemented, - "None" : self.w_None, - "Ellipsis" : self.w_Ellipsis, - "buffer": buffer, - #"xrange": xrange, - "slice": slice, - } - for n, c in cpy_builtin.__dict__.iteritems(): - if n in ['xrange', # we define this in builtin_app - 'staticmethod', - 'classmethod', - 'property', - ]: - continue - if isinstance(c, types.TypeType): - setattr(self, 'w_' + c.__name__, c) - newstuff[c.__name__] = c - newstuff.update(self.clone_exception_hierarchy()) - self.make_builtins(newstuff) - - # general stuff - def wrap(self, x): - if isinstance(x, BaseWrappable): - x = x.__spacebind__(self) - wrapperclass = self.hackwrapperclass(x.typedef) - instance = CPyWrapper.__new__(wrapperclass) - instancedict = CPyWrapper.__dict__['__dict__'].__get__(instance) - instancedict['__internalpypyobject__'] = x - return instance - elif x is None: - return self.w_None - else: - # optional check for double-wrapping - if isinstance(x, CPyWrapper): - raise TypeError, "wrapping an already-wrapped object" - # grumble grumble grumble recursive wrapping grumble - if isinstance(x, tuple): - return tuple([self.wrap(y) for y in x]) - return x - - def unwrap(self, w): - if isinstance(w, CPyWrapper): - instancedict = CPyWrapper.__dict__['__dict__'].__get__(w) - return instancedict['__internalpypyobject__'] - else: - return w - - unwrap_builtin = unwrap - - def getdict(self, w_obj): - if isinstance(w_obj, CPyWrapper): - obj = self.unwrap(w_obj) - return obj.getdict() - else: - try: - return w_obj.__dict__ - except: - self.reraise() - - def getdict_or_complain(self, w_obj): - result = self.getdict(w_obj) - if result is None: - raise OperationError(self.w_AttributeError, - self.wrap('no __dict__')) - return result - - def allocate_instance(self, cls, w_subtype): - raise NotImplementedError("cannot manually instantiate built-in types") - - def hackwrapperclass(self, typedef): - try: - return typedef.trivialwrapperclass - except AttributeError: - from pypy.interpreter.gateway import interp2app - - # make the base first - if typedef.base: - bases = (self.hackwrapperclass(typedef.base),) - else: - bases = (CPyWrapper,) - # make the class dict with descriptors redirecting to the ones - # in rawdict - descrdict = {'__internalpypytypedef__': typedef} - for descrname, descr in typedef.rawdict.items(): - if isinstance(descr, interp2app): - def make_stuff(descr=descr, descrname=descrname, space=self): - def stuff(w_obj, *args, **kwds): - fn = descr.get_function(space) - args = Arguments(space, list(args), kwds) - try: - return space.call_args(space.wrap(fn), - args.prepend(w_obj)) - except OperationError, e: - if not hasattr(e.w_type, 'originalex'): - raise # XXX - # XXX normalize ... - #if isinstance(e.w_value, e.w_type): - raise e.w_type.originalex(repr(e.w_value)) # e.w_value) - return stuff - descrdict[descrname] = make_stuff() - else: - # more generally, defining a property - def fget(w_obj, descr=descr, space=self): - w_descr = space.wrap(descr) - return space.get(w_descr, w_obj) - def fset(w_obj, w_value, descr=descr, space=self): - w_descr = space.wrap(descr) - return space.set(w_descr, w_obj, w_value) - def fdel(w_obj, descr=descr, space=self): - w_descr = space.wrap(descr) - return space.delete(w_descr, w_obj) - descrdict[descrname] = property(fget, fset, fdel) - cls = type('CPyWrapped '+typedef.name, bases, descrdict) - typedef.trivialwrapperclass = cls - return cls - - def is_(self, w_obj1, w_obj2): - return self.unwrap(w_obj1) is self.unwrap(w_obj2) - - def id(self, w_obj): - return id(self.unwrap(w_obj)) - - def unpacktuple(self, w_tuple, expected_length=None): - assert isinstance(w_tuple, tuple) - if expected_length is not None and expected_length != len(w_tuple): - raise ValueError, "got a tuple of length %d instead of %d" % ( - len(w_tuple), expected_length) - return list(w_tuple) - - def reraise(self): - #import traceback - #traceback.print_exc() - #ec = self.getexecutioncontext() # .framestack.items[-1] - #ec.print_detailed_traceback(self) - etype, evalue, etb = sys.exc_info() - if etype is OperationError: - raise etype, evalue, etb # just re-raise it - name = etype.__name__ - if hasattr(self, 'w_' + name): - nt = getattr(self, 'w_' + name) - nv = object.__new__(nt) - if isinstance(evalue, etype): - nv.args = evalue.args - else: - print [etype, evalue, nt, nv], - print '!!!!!!!!' - nv.args = (evalue,) - else: - nt = etype - nv = evalue - raise OperationError, OperationError(nt, nv), etb - - # from the built-ins - def issubtype(self, w_x, w_y): - try: - return issubclass(w_x, w_y) - except: - self.reraise() - - def newtuple(self, args_w): - return tuple(args_w) - - def newlist(self, args_w): - return list(args_w) - - def newdict(self, items_w): - try: - return dict(items_w) - except: - self.reraise() - - def newslice(self, *args_w): - try: - return slice(*args_w) - except: - self.reraise() - - def is_true(self, w_obj): - return not not w_obj - - def not_(self, w_obj): - return not w_obj - - def type(self, w_x): - return type(w_x) - - def ord(self, w_x): - try: - return ord(w_x) - except: - self.reraise() - - def round(self, w_x): - try: - return round(w_x) - except: - self.reraise() - - def iter(self, w_obj): - if isinstance(w_obj, str) and not hasattr(w_obj, '__iter__'): - return iter(w_obj) # str.__iter__ is missing in CPython - else: - return DescrOperation.iter(self, w_obj) - - def newstring(self, asciilist): - try: - return ''.join([chr(ascii) for ascii in asciilist]) - except: - self.reraise() - - def newseqiter(self, w_obj): - try: - return iter(w_obj) - except: - self.reraise() - - def lookup(space, w_obj, name): - assert not isinstance(w_obj, BaseWrappable) - if isinstance(w_obj, CPyWrapper): - typedef = type(w_obj).__internalpypytypedef__ - while typedef is not None: - if name in typedef.rawdict: - return space.wrap(typedef.rawdict[name]) - typedef = typedef.base - if name in space.object_typedef.rawdict: - return space.wrap(space.object_typedef.rawdict[name]) - return None - else: - for cls in w_obj.__class__.__mro__: - if name in cls.__dict__: - return cls.__dict__[name] - return None - - def get_and_call_args(self, w_descr, w_obj, args): - if isinstance(w_descr, CPyWrapper): - return DescrOperation.get_and_call_args(self, w_descr, w_obj, args) - else: - try: - obj = self.unwrap(w_obj) - if hasattr(w_descr, '__get__'): - obj = w_descr.__get__(obj, type(obj)) - args_w, kwds_w = args.unpack() - return obj(*args_w, **kwds_w) - except: - #import traceback; traceback.print_exc() - self.reraise() - - -for m in ObjSpace.MethodTable: - if not hasattr(TrivialObjSpace, m[0]): - print 'XXX there is no', m[0], 'in TrivialObjSpace' - -Space = TrivialObjSpace diff --git a/pypy/test_all.py b/pypy/test_all.py deleted file mode 100755 index eda6bbb30d..0000000000 --- a/pypy/test_all.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python - -if __name__ == '__main__': - import tool.autopath - from py.__impl__.test.cmdline import main - main() diff --git a/pypy/tool/__init__.py b/pypy/tool/__init__.py deleted file mode 100644 index ea30561d83..0000000000 --- a/pypy/tool/__init__.py +++ /dev/null @@ -1 +0,0 @@ -#empty diff --git a/pypy/tool/autopath.py b/pypy/tool/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/tool/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/tool/bltinchecker.py b/pypy/tool/bltinchecker.py deleted file mode 100644 index 24e583a9d0..0000000000 --- a/pypy/tool/bltinchecker.py +++ /dev/null @@ -1,80 +0,0 @@ -#python imports -try: - import readline - import rlcompleter - have_readline = True - -except: - have_readline = False - -import __builtin__ as cpy_builtin - -#pypy imports -import autopath -from pypy.objspace.std import Space -from pypy.interpreter.baseobjspace import OperationError - -def found_missing(name, attrs, w_obj, space): - #we cannot do a dir on a pypy wrapped object - found_attrs = [] - missing_attrs = [] - - for a in attrs: - try: - try: - w_name = space.wrap(a) - t = space.getattr(w_obj, w_name) - found_attrs.append(a) - except OperationError: #over-broad? - missing_attrs.append(a) - - except: #something horrible happened - print 'In builtin: ', name, 'Blew Up Trying to get attr: ', repr(a) - missing_attrs.append(a) - - return found_attrs, missing_attrs - -def check_for_attrs(name, space): - try: - cpy_attrs = dir(getattr(cpy_builtin, name)) - except AttributeError: - print "AttributeError: CPython module __builtin__ has no attribute '%s'" % name - return [], [] - - try: - w_obj = space.getitem(space.w_builtins, space.wrap(name)) - except OperationError: - print "AttributeError: PyPy space %s builtins has no attribute '%s'" % (space, name) - return [], cpy_attrs - - return found_missing(name, cpy_attrs, w_obj, space) - - -def print_report(names, space): - - for n in names: - found, missing = check_for_attrs(n, space) - print - print "For the Builtin Function or Type: '%s'" % n - print - print 'Found Attributes: \t', found - print - print 'Missing Attributes: \t', missing - print - print '-------------------------' - -if __name__ == '__main__': - - from pypy.tool import option - from pypy.tool import test - args = option.process_options(option.get_standard_options(), - option.Options) - - - # Create objspace... - objspace = option.objspace() - - #names = dir(cpy_builtin) - names = ['abs', 'xxx', 'unicode', 'enumerate', 'int'] - - print_report(names, objspace) diff --git a/pypy/tool/cache.py b/pypy/tool/cache.py deleted file mode 100644 index bfcffea357..0000000000 --- a/pypy/tool/cache.py +++ /dev/null @@ -1,35 +0,0 @@ - -# hacks += 1 -class Cache: - frozen = True - - def __init__(self): - self.content = {} - self.frozen = False - - def __hash__(self): - if not self.frozen: - #raise TypeError, "cannot get hash of un-frozen cache" - self.freeze() - return id(self) - - def clear(self): - if self.frozen: - raise TypeError, "cannot clear frozen cache" - self.content.clear() - - def getorbuild(self, key, builder, stuff): - try: - return self.content[key] - except KeyError: - assert not self.frozen, "cannot build %r, cache already frozen" % key - result = builder(key, stuff) - #assert key not in self.content, "things messed up" - self.content[key] = result - return result - # note to annotator: we want loadfromcache() to be - # specialized for the different cache types - getorbuild._specialize_ = "location" - - def freeze(self): - del self.frozen diff --git a/pypy/tool/dotypes.py b/pypy/tool/dotypes.py deleted file mode 100644 index cb8e2bd7e5..0000000000 --- a/pypy/tool/dotypes.py +++ /dev/null @@ -1,57 +0,0 @@ -#! /usr/bin/env python - -import types -import __builtin__ -builtinitems = vars(__builtin__).iteritems -import sys - -typesdone = {} -exports = [] - -def dotype(synonym): - typeobject = getattr(types, synonym) - if type(typeobject) is not type: return - exports.append(synonym) - if typeobject in typesdone: - print 'setattr(_types, %r, %s)' % (synonym, typeobject.__name__) - print - return - typesdone[typeobject] = 1 - - typename = typeobject.__name__ - typetitle = typename.title() - print 'class %s(object):' % typename - print - print ''' def __new__(cls, *args): - if cls is %s: - return pypy.%sObjectFactory(args) - else: - return pypy.UserObjectFactory(cls, pypy.%sObjectFactory, args) - - def __repr__(self): - return str(self) -''' % (typename, typetitle, typetitle) - - sys.stdout.write('_register(pypy.%sObjectFactory, %s'%(typetitle, typename)) - - for n, v in builtinitems(): - if v is typeobject: - if n != typename: - sys.stdout.write(', in_builtin=%r' % n) - break - else: - sys.stdout.write(', in_builtin=False') - - default_synonym = typetitle + 'Type' - if synonym != default_synonym: - sys.stdout.write(', synonym=%r' % synonym) - sys.stdout.write(')\n') - - print - print - -for synonym in dir(types): - dotype(synonym) -print -print '__all__ = %r' % exports - diff --git a/pypy/tool/example_pytest.py b/pypy/tool/example_pytest.py deleted file mode 100644 index 805ac94b6b..0000000000 --- a/pypy/tool/example_pytest.py +++ /dev/null @@ -1,26 +0,0 @@ -class AppTestTest: - def test_app_method(self): - assert 42 == 41 - -def app_test_app_func(): - assert 41 == 42 - -def test_interp_func(space): - assert space.is_true(space.w_None) - -class TestInterpTest: - def test_interp_method(self): - assert self.space.is_true(self.space.w_False) - -def app_test_raises_something(): - int("hallo") - -def app_test_raises_wrong1(): - raises(SyntaxError, 'int("hello")') - -def app_test_raises_wrong2(): - raises(SyntaxError, int, "hello") - -def app_test_raises_doesnt(): - raises(ValueError, int, 3) - diff --git a/pypy/tool/fixeol b/pypy/tool/fixeol deleted file mode 100755 index 568f2925c9..0000000000 --- a/pypy/tool/fixeol +++ /dev/null @@ -1,86 +0,0 @@ -#! /usr/bin/env python - -import sys, os - - -forbidden = range(0,32) -forbidden.remove(9) # tab -forbidden.remove(10) # lf -forbidden.remove(12) # ff, ^L -forbidden.remove(13) # cr - - -def looksbinary(data, forbidden = [chr(i) for i in forbidden]): - "Check if some data chunk appears to be binary." - for c in forbidden: - if c in data: - return True - return False - - -def binary2text(filename): - "Convert a file to the platform's native end-of-line format if needed." - f = open(filename, 'rb') - data = f.read() - f.close() - if looksbinary(data): - return False - original = data - data = data.replace('\r\n', '\n') - data = data.replace('\r', '\n') - data = data.replace('\n', os.linesep) - if data != original: - f = open(filename, 'wb') - f.write(data) - f.close() - return True - - -def asserttextfile(fname): - "Assert a file is a text file or issue a warning otherwise." - # safety check to nail binary files - try: - if not binary2text(fname): - print >> sys.stderr, "*** warning, looks like a binary file:", - print >> sys.stderr, fname - return - except IOError, e: - print "skipping %r because of %s" %(fname, e) - else: - # change end-of-line style of each .py and .txt file to 'native' - os.system('svn propset svn:eol-style native %s' % fname) - - -def fixpyfiles(ignored, dirname, fnames): - "Fix Python files in some directory." - numpyfiles = 0 - for fname in fnames: - if fname.endswith('.py') or fname.endswith('.txt') or fname.endswith('.asc'): - asserttextfile(os.path.join(dirname, fname)) - numpyfiles += 1 - if numpyfiles: - # ignore '*.pyc' and '*.pyo' in any directory containing .py files - g = os.popen('svn propget svn:ignore %s' % dirname) - content = g.readlines() - g.close() - oldlen = len(content) - if '*.pyc\n' not in content: - content.append('*.pyc\n') - if '*.pyo\n' not in content: - content.append('*.pyo\n') - if len(content) > oldlen: - g = open('svn-ignore.tmp', 'w') - g.writelines(content) - g.close() - os.system('svn propset svn:ignore -F svn-ignore.tmp %s' % dirname) - os.unlink('svn-ignore.tmp') - if '.svn' in fnames: - fnames.remove('.svn') - - -if __name__ == '__main__': - if len(sys.argv) > 1: - for fname in sys.argv[1:]: - asserttextfile(fname) - else: - os.path.walk(os.curdir, fixpyfiles, None) diff --git a/pypy/tool/fiximport.py b/pypy/tool/fiximport.py deleted file mode 100644 index a00f26ec57..0000000000 --- a/pypy/tool/fiximport.py +++ /dev/null @@ -1,188 +0,0 @@ -import sys -import re -import parser - -blank_re=re.compile(r"\s*(#.*)?") - -def get_indent(line): - indent = re.match(blank_re,line).group() - return indent, indent==line - -def read_whole_suite(intro_line): - base_indent, dummy = get_indent(intro_line) - lines = [] - cont = [] - parsing_prefix = "" - if len(base_indent) > 0: - parsing_prefix = "if 0:\n" - while True: - line = f.readline() - if not line: - break - indent, isblank = get_indent(line) - if isblank: - pass - elif cont: - cont.append(line) - try: - parser.suite(parsing_prefix+''.join(cont)) - except SyntaxError: - pass - else: - cont = [] - else: - if len(indent) <= len(base_indent): - pushback.append(line) - break - try: - parser.suite(parsing_prefix+line) - except SyntaxError: - cont = [line] - lines.append(line) - - return base_indent,lines - -pass_re = re.compile(r"^\s*pass\s*$") -getobjspace_re = re.compile(r"testit\.objspace\((.*)\)") -setspace_re = re.compile(r"self\.space\s*=\s*") - -def up_down_port(lines): - npass = 0 - nblank = 0 - objspace = None - n = -1 - for line in lines: - n += 1 - dummy, isblank = get_indent(line) - if isblank: - nblank += 1 - continue - if re.match(pass_re, line): - npass += 1 - continue - m = re.search(getobjspace_re, line) - if m: - objspace = m.group(1) - line = line[:m.start()]+"self.space"+line[m.end():] - line = re.sub(setspace_re,"",line) - if line.strip() == "self.space": - line = "" - nblank += 1 - lines[n] = line - - skip = npass+nblank == len(lines) - - return objspace,skip - - -# option -o let you pick a default objspace which will be enforced -# trough a module global objspacename = - -default_objspace = None - -if sys.argv[1].startswith('-o'): - default_objspace = sys.argv[1][2:] - files = sys.argv[2:] -else: - files = sys.argv[1:] - -print "-- default-objspace: %s" % default_objspace - -for fn in files: - print fn - lines = [] - pushback = [] - kind = None - f = file(fn, 'r') - - global_objspacename = False - confused = False - - import_autopath_lineno = -1 - - while True: - if pushback: - line = pushback.pop() - else: - line = f.readline() - if not line: - break - rline = line.rstrip() - if rline == 'from pypy.tool import testit': - continue - if (rline == "if __name__ == '__main__':" or - rline == 'if __name__ == "__main__":'): - tail = f.read() - if tail.strip() != 'testit.main()': - print ' * uncommon __main__ lines at the end' - confused = True - break - if default_objspace and not global_objspacename and (line.startswith('def ') or line.startswith('class ')): - lines.extend(["objspacename = %r\n" % default_objspace,"\n"]) - global_objspacename = True - - if line.strip() == 'def setUp(self):': - base_indent,suite = read_whole_suite(line) - objspace,skip = up_down_port(suite) - #print suite - if objspace: - if default_objspace: - if eval(objspace) != default_objspace: - print "* default objspace mismatch: %s" % objspace - confused = True - continue - else: - lines.append(base_indent+"objspacename = %s\n" % objspace) - lines.append("\n") - if not skip: - lines.append(base_indent+"def setup_method(self,method):\n") - lines.extend(suite) - continue - if line.strip() == 'def tearDown(self):': - base_indent, suite = read_whole_suite(line) - unexpected,skip = up_down_port(suite) - if unexpected is not None: - print "* testit.objspace(<name>) in tearDown" - confused = True - #print suite - if not skip: - lines.append(base_indent+"def teardown_method(self,method):\n") - lines.extend(suite) - continue - if line.startswith('class '): - rest = line[6:].strip() - if rest.endswith('(testit.AppTestCase):'): - rest = rest[:-21].strip() + ':' - if not rest.startswith('AppTest'): - if not rest.startswith('Test'): - rest = 'Test'+rest - rest = 'App'+rest - kind = 'app-test' - elif rest.endswith('(testit.IntTestCase):'): - rest = rest[:-21].strip() + ':' - if not rest.startswith('Test'): - rest = 'Test'+rest - kind = 'test' - elif rest.endswith('(testit.TestCase):'): - rest = rest[:-18].strip() + ':' - if not rest.startswith('Test'): - rest = 'Test'+rest - kind = 'test' - else: - print ' * ignored class', rest - line = 'class ' + rest + '\n' - if line.rstrip() == "import autopath": - import_autopath_lineno = len(lines) - lines.append(line) - f.close() - - while lines and not lines[-1].strip(): - del lines[-1] - - if confused: - print "** confused: file not changed" - else: - #sys.stdout.writelines(lines) - f = file(fn, 'w') - f.writelines(lines) - f.close() diff --git a/pypy/tool/hack.py b/pypy/tool/hack.py deleted file mode 100644 index 02525889d5..0000000000 --- a/pypy/tool/hack.py +++ /dev/null @@ -1,24 +0,0 @@ -import autopath - -import new, sys - -if sys.version_info >= (2, 3): - - def func_with_new_name(func, newname): - f = new.function(func.func_code, func.func_globals, - newname, func.func_defaults, - func.func_closure) - if func.func_dict: - f.func_dict = {} - f.func_dict.update(func.func_dict) - return f - -else: - - def func_with_new_name(func, newname): - return func - -if __name__ == '__main__': - def f(): pass - g = func_with_new_name(f, 'g') - print g.__name__ diff --git a/pypy/tool/methodChecker.py b/pypy/tool/methodChecker.py deleted file mode 100644 index 7435a39ca2..0000000000 --- a/pypy/tool/methodChecker.py +++ /dev/null @@ -1,47 +0,0 @@ -import autopath -from pypy.objspace.std import Space -from pypy.interpreter.main import eval_string - -class MethodChecker(object): - """ Checks which methods are available on builtin types.""" - - def __init__(self, types=(1, 1.0, "'a'", [], {}, (), None)): - - space = Space() - str = ['-', 'Implemented'] - - totalImplemented = 0 - totalNotImplemented = 0 - - for oneType in types: - subImplemented = 0 - subNotImplemented = 0 - - attribArr = dir(type(oneType)) - for attrib in attribArr: - x = space.unwrap(eval_string( - 'hasattr(%s,"%s")\n' % (oneType, attrib), - '<string>', space)) - print '%-16s%-18s%s' % (type(oneType), attrib, str[x]) - if x: - subImplemented += 1 - totalImplemented += 1 - else: - subNotImplemented += 1 - totalNotImplemented += 1 - print - print ' %-16s Implemented: %3d' % (type(oneType), - subImplemented) - print ' %-16s Not implemented: %3d' % (type(oneType), - subNotImplemented) - print ' %-16s TOTAL: %3d' % ( - type(oneType), subNotImplemented + subImplemented) - print - - print 'TOTAL Implemented: %3d' % totalImplemented - print 'TOTAL Not implemented: %3d' % totalNotImplemented - print 'GRAND TOTAL: %3d' % ( - totalNotImplemented + totalImplemented) - -if __name__ == '__main__': - MethodChecker() diff --git a/pypy/tool/modanalyze.py b/pypy/tool/modanalyze.py deleted file mode 100644 index 734d6b395e..0000000000 --- a/pypy/tool/modanalyze.py +++ /dev/null @@ -1,84 +0,0 @@ -""" - This module analyzes the delta between the attributes of a module - as seen by the PyPy interpreter, and the same module as seen by the - CPython interpreter. It can be used to help insure that pure-python - functionality as provided by PyPy matches the original functionality - provided by CPython. - - This module may be used as a standalone script, with modules to be - analyzed listed on the command line (default if none given is to - analyze __builtin__) or the moduledelta() function may be called - from other modules. When used standalone, it always analyzes - using the standard object space. - - The current implementation does not examine function signatures -- - it only shows the attributes which are exclusively in one module - or the other. - - The current implementation also may not work for non-builtin extension - modules, depending on whether there is a different path variable - inside PyPy, or whether the presence of the PyPy pure python module - will shadow the original C module, making it unavailable for comparison. -""" - -import autopath -from pypy.interpreter.gateway import app2interp -from sets import Set - -def app_getmodattributes(modname): - """ Return the attributes of the named module """ - pypy_module = __import__(modname,globals(),None,[]) - return pypy_module.__dict__.keys() - -def moduledelta(space,modname): - """ - moduledelta(space,modname) imports the module from inside - the given space, and also from CPython, and returns a tuple - (missing,extra) which describes attributes in CPython but - not in PyPy, and the opposite. - """ - - wrapped_func = app2interp(app_getmodattributes).get_function(space) - - pypy_module = wrapped_func(space.wrap(modname)) - pypy_module = space.unpackiterable(pypy_module) - pypy_module = [space.unwrap(x) for x in pypy_module] - pypy_module = Set(pypy_module) - - c_module = __import__(modname,globals(),None,[]) - c_module = Set(c_module.__dict__.keys()) | Set(['__dict__','__new__']) - diff = c_module ^ pypy_module - missing = list(diff & c_module) - extra = list(diff & pypy_module) - missing.sort() - extra.sort() - return missing,extra - -if __name__ == '__main__': - from sys import argv - from pypy.objspace.std import StdObjSpace - - def showdiff(name,stuff): - if stuff: - print - print name - for i in stuff: print " ",i - print - - modlist = argv[1:] - if not modlist: - print "modanalyze <module> [<module> ...]" - print - print "Analyzing __builtin__ by default" - print - modlist = ['__builtin__'] - - print "Initializing std object space" - std = StdObjSpace() - - for modname in modlist: - print - print 'Comparing %s module' % modname - missing,extra = moduledelta(std,modname) - showdiff(" Missing from PyPy",missing) - showdiff(" Extra in PyPy",extra) diff --git a/pypy/tool/opcode.py b/pypy/tool/opcode.py deleted file mode 100644 index 94fcf61365..0000000000 --- a/pypy/tool/opcode.py +++ /dev/null @@ -1,191 +0,0 @@ - -""" -opcode module - potentially shared between dis and other modules which -operate on bytecodes (e.g. peephole optimizers). -""" - -__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs", - "haslocal", "hascompare", "hasfree", "opname", "opmap", - "HAVE_ARGUMENT", "EXTENDED_ARG"] - -cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is', - 'is not', 'exception match', 'BAD') - -hasconst = [] -hasname = [] -hasjrel = [] -hasjabs = [] -haslocal = [] -hascompare = [] -hasfree = [] - -opmap = {} -opname = [''] * 256 -for op in range(256): opname[op] = '<' + `op` + '>' -del op - -def def_op(name, op): - opname[op] = name - opmap[name] = op - -def name_op(name, op): - def_op(name, op) - hasname.append(op) - -def jrel_op(name, op): - def_op(name, op) - hasjrel.append(op) - -def jabs_op(name, op): - def_op(name, op) - hasjabs.append(op) - -# Instruction opcodes for compiled code - -def_op('STOP_CODE', 0) -def_op('POP_TOP', 1) -def_op('ROT_TWO', 2) -def_op('ROT_THREE', 3) -def_op('DUP_TOP', 4) -def_op('ROT_FOUR', 5) -def_op('IEXEC_THROW', 6) - -def_op('UNARY_POSITIVE', 10) -def_op('UNARY_NEGATIVE', 11) -def_op('UNARY_NOT', 12) -def_op('UNARY_CONVERT', 13) - -def_op('UNARY_INVERT', 15) - -def_op('BINARY_POWER', 19) - -def_op('BINARY_MULTIPLY', 20) -def_op('BINARY_DIVIDE', 21) -def_op('BINARY_MODULO', 22) -def_op('BINARY_ADD', 23) -def_op('BINARY_SUBTRACT', 24) -def_op('BINARY_SUBSCR', 25) -def_op('BINARY_FLOOR_DIVIDE', 26) -def_op('BINARY_TRUE_DIVIDE', 27) -def_op('INPLACE_FLOOR_DIVIDE', 28) -def_op('INPLACE_TRUE_DIVIDE', 29) - -def_op('SLICE+0', 30) -def_op('SLICE+1', 31) -def_op('SLICE+2', 32) -def_op('SLICE+3', 33) - -def_op('STORE_SLICE+0', 40) -def_op('STORE_SLICE+1', 41) -def_op('STORE_SLICE+2', 42) -def_op('STORE_SLICE+3', 43) - -def_op('DELETE_SLICE+0', 50) -def_op('DELETE_SLICE+1', 51) -def_op('DELETE_SLICE+2', 52) -def_op('DELETE_SLICE+3', 53) - -def_op('INPLACE_ADD', 55) -def_op('INPLACE_SUBTRACT', 56) -def_op('INPLACE_MULTIPLY', 57) -def_op('INPLACE_DIVIDE', 58) -def_op('INPLACE_MODULO', 59) -def_op('STORE_SUBSCR', 60) -def_op('DELETE_SUBSCR', 61) - -def_op('BINARY_LSHIFT', 62) -def_op('BINARY_RSHIFT', 63) -def_op('BINARY_AND', 64) -def_op('BINARY_XOR', 65) -def_op('BINARY_OR', 66) -def_op('INPLACE_POWER', 67) -def_op('GET_ITER', 68) - -def_op('PRINT_EXPR', 70) -def_op('PRINT_ITEM', 71) -def_op('PRINT_NEWLINE', 72) -def_op('PRINT_ITEM_TO', 73) -def_op('PRINT_NEWLINE_TO', 74) -def_op('INPLACE_LSHIFT', 75) -def_op('INPLACE_RSHIFT', 76) -def_op('INPLACE_AND', 77) -def_op('INPLACE_XOR', 78) -def_op('INPLACE_OR', 79) -def_op('BREAK_LOOP', 80) -def_op('END_IEXEC', 81) - -def_op('LOAD_LOCALS', 82) -def_op('RETURN_VALUE', 83) -def_op('IMPORT_STAR', 84) -def_op('EXEC_STMT', 85) -def_op('YIELD_VALUE', 86) - -def_op('POP_BLOCK', 87) -def_op('END_FINALLY', 88) -def_op('BUILD_CLASS', 89) - -HAVE_ARGUMENT = 90 # Opcodes from here have an argument: - -name_op('STORE_NAME', 90) # Index in name list -name_op('DELETE_NAME', 91) # "" -def_op('UNPACK_SEQUENCE', 92) # Number of tuple items -jrel_op('FOR_ITER', 93) - -name_op('STORE_ATTR', 95) # Index in name list -name_op('DELETE_ATTR', 96) # "" -name_op('STORE_GLOBAL', 97) # "" -name_op('DELETE_GLOBAL', 98) # "" -def_op('DUP_TOPX', 99) # number of items to duplicate -def_op('LOAD_CONST', 100) # Index in const list -hasconst.append(100) -name_op('LOAD_NAME', 101) # Index in name list -def_op('BUILD_TUPLE', 102) # Number of tuple items -def_op('BUILD_LIST', 103) # Number of list items -def_op('BUILD_MAP', 104) # Always zero for now -name_op('LOAD_ATTR', 105) # Index in name list -def_op('COMPARE_OP', 106) # Comparison operator -hascompare.append(106) -name_op('IMPORT_NAME', 107) # Index in name list -name_op('IMPORT_FROM', 108) # Index in name list - -jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip -jrel_op('JUMP_IF_FALSE', 111) # "" -jrel_op('JUMP_IF_TRUE', 112) # "" -jabs_op('JUMP_ABSOLUTE', 113) # Target byte offset from beginning of code - -name_op('LOAD_GLOBAL', 116) # Index in name list - -jabs_op('CONTINUE_LOOP', 119) # Target address -jrel_op('SETUP_LOOP', 120) # Distance to target address -jrel_op('SETUP_EXCEPT', 121) # "" -jrel_op('SETUP_FINALLY', 122) # "" -jrel_op('SETUP_IEXEC', 123) # "" - -def_op('LOAD_FAST', 124) # Local variable number -haslocal.append(124) -def_op('STORE_FAST', 125) # Local variable number -haslocal.append(125) -def_op('DELETE_FAST', 126) # Local variable number -haslocal.append(126) - -def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3) -def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8) -def_op('MAKE_FUNCTION', 132) # Number of args with default values -def_op('BUILD_SLICE', 133) # Number of items - -def_op('MAKE_CLOSURE', 134) -def_op('LOAD_CLOSURE', 135) -hasfree.append(135) -def_op('LOAD_DEREF', 136) -hasfree.append(136) -def_op('STORE_DEREF', 137) -hasfree.append(137) - -def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8) -def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8) -def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8) - -def_op('EXTENDED_ARG', 143) -EXTENDED_ARG = 143 - -del def_op, name_op, jrel_op, jabs_op diff --git a/pypy/tool/optik.py b/pypy/tool/optik.py deleted file mode 100644 index f74c669068..0000000000 --- a/pypy/tool/optik.py +++ /dev/null @@ -1,1667 +0,0 @@ -"""optik - -A powerful, extensible, and easy-to-use command-line parser for Python. - -By Greg Ward <gward@python.net> - -See http://optik.sourceforge.net/ -""" - -"""optik.errors - -Exception classes used by Optik. -""" - -# Copyright (c) 2001-2003 Gregory P. Ward. All rights reserved. -# See the README.txt distributed with Optik for licensing terms. - -# created 2001/10/17 GPW (from optik.py) - -import sys -import types, os -import string, re - -class OptikError (Exception): - def __init__ (self, msg): - self.msg = msg - - def __str__ (self): - return self.msg - - -class OptionError (OptikError): - """ - Raised if an Option instance is created with invalid or - inconsistent arguments. - """ - - def __init__ (self, msg, option): - self.msg = msg - self.option_id = str(option) - - def __str__ (self): - if self.option_id: - return "option %s: %s" % (self.option_id, self.msg) - else: - return self.msg - -class OptionConflictError (OptionError): - """ - Raised if conflicting options are added to an OptionParser. - """ - -class OptionValueError (OptikError): - """ - Raised if an invalid option value is encountered on the command - line. - """ - -class BadOptionError (OptikError): - """ - Raised if an invalid or ambiguous option is seen on the command-line. - """ - - -class HelpFormatter: - - """ - Abstract base class for formatting option help. OptionParser - instances should use one of the HelpFormatter subclasses for - formatting help; by default IndentedHelpFormatter is used. - - Instance attributes: - indent_increment : int - the number of columns to indent per nesting level - max_help_position : int - the maximum starting column for option help text - help_position : int - the calculated starting column for option help text; - initially the same as the maximum - width : int - total number of columns for output - level : int - current indentation level - current_indent : int - current indentation level (in columns) - help_width : int - number of columns available for option help text (calculated) - """ - - def __init__ (self, - indent_increment, - max_help_position, - width, - short_first): - self.indent_increment = indent_increment - self.help_position = self.max_help_position = max_help_position - self.width = width - self.current_indent = 0 - self.level = 0 - self.help_width = width - max_help_position - self.short_first = short_first - - def indent (self): - self.current_indent += self.indent_increment - self.level += 1 - - def dedent (self): - self.current_indent -= self.indent_increment - assert self.current_indent >= 0, "Indent decreased below 0." - self.level -= 1 - - def format_usage (self, usage): - raise NotImplementedError, "subclasses must implement" - - def format_heading (self, heading): - raise NotImplementedError, "subclasses must implement" - - def format_description (self, description): - desc_width = self.width - self.current_indent - indent = " "*self.current_indent - return fill(description, desc_width, - initial_indent=indent, - subsequent_indent=indent) - - def format_option (self, option): - # The help for each option consists of two parts: - # * the opt strings and metavars - # eg. ("-x", or "-fFILENAME, --file=FILENAME") - # * the user-supplied help string - # eg. ("turn on expert mode", "read data from FILENAME") - # - # If possible, we write both of these on the same line: - # -x turn on expert mode - # - # But if the opt string list is too long, we put the help - # string on a second line, indented to the same column it would - # start in if it fit on the first line. - # -fFILENAME, --file=FILENAME - # read data from FILENAME - result = [] - opts = option.option_strings - opt_width = self.help_position - self.current_indent - 2 - if len(opts) > opt_width: - opts = "%*s%s\n" % (self.current_indent, "", opts) - indent_first = self.help_position - else: # start help on same line as opts - opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) - indent_first = 0 - result.append(opts) - if option.help: - help_lines = wrap(option.help, self.help_width) - result.append("%*s%s\n" % (indent_first, "", help_lines[0])) - result.extend(["%*s%s\n" % (self.help_position, "", line) - for line in help_lines[1:]]) - elif opts[-1] != "\n": - result.append("\n") - return "".join(result) - - def store_option_strings (self, parser): - self.indent() - max_len = 0 - for opt in parser.option_list: - strings = self.format_option_strings(opt) - opt.option_strings = strings - max_len = max(max_len, len(strings) + self.current_indent) - self.indent() - for group in parser.option_groups: - for opt in group.option_list: - strings = self.format_option_strings(opt) - opt.option_strings = strings - max_len = max(max_len, len(strings) + self.current_indent) - self.dedent() - self.dedent() - self.help_position = min(max_len + 2, self.max_help_position) - - def format_option_strings (self, option): - """Return a comma-separated list of option strings & metavariables.""" - if option.takes_value(): - metavar = option.metavar or option.dest.upper() - short_opts = [sopt + metavar for sopt in option._short_opts] - long_opts = [lopt + "=" + metavar for lopt in option._long_opts] - else: - short_opts = option._short_opts - long_opts = option._long_opts - - if self.short_first: - opts = short_opts + long_opts - else: - opts = long_opts + short_opts - - return ", ".join(opts) - -class IndentedHelpFormatter (HelpFormatter): - """Format help with indented section bodies. - """ - - def __init__ (self, - indent_increment=2, - max_help_position=24, - width=80, - short_first=1): - HelpFormatter.__init__( - self, indent_increment, max_help_position, width, short_first) - - def format_usage (self, usage): - return "usage: %s\n" % usage - - def format_heading (self, heading): - return "%*s%s:\n" % (self.current_indent, "", heading) - - -class TitledHelpFormatter (HelpFormatter): - """Format help with underlined section headers. - """ - - def __init__ (self, - indent_increment=0, - max_help_position=24, - width=80, - short_first=0): - HelpFormatter.__init__ ( - self, indent_increment, max_help_position, width, short_first) - - def format_usage (self, usage): - return "%s %s\n" % (self.format_heading("Usage"), usage) - - def format_heading (self, heading): - return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading)) -"""optik.option - -Defines the Option class and some standard value-checking functions. -""" - -# Copyright (c) 2001-2003 Gregory P. Ward. All rights reserved. -# See the README.txt distributed with Optik for licensing terms. - -# created 2001/10/17, GPW (from optik.py) - - -# Do the right thing with boolean values for all known Python versions. -try: - True, False -except NameError: - (True, False) = (1, 0) - -_builtin_cvt = { "int" : (int, "integer"), - "long" : (long, "long integer"), - "float" : (float, "floating-point"), - "complex" : (complex, "complex") } - -def check_builtin (option, opt, value): - (cvt, what) = _builtin_cvt[option.type] - try: - return cvt(value) - except ValueError: - raise OptionValueError( - #"%s: invalid %s argument %r" % (opt, what, value)) - "option %s: invalid %s value: %r" % (opt, what, value)) - -def check_choice(option, opt, value): - if value in option.choices: - return value - else: - choices = ", ".join(map(repr, option.choices)) - raise OptionValueError( - "option %s: invalid choice: %r (choose from %s)" - % (opt, value, choices)) - -# Not supplying a default is different from a default of None, -# so we need an explicit "not supplied" value. -NO_DEFAULT = "NO"+"DEFAULT" - - -class Option: - """ - Instance attributes: - _short_opts : [string] - _long_opts : [string] - - action : string - type : string - dest : string - default : any - nargs : int - const : any - choices : [string] - callback : function - callback_args : (any*) - callback_kwargs : { string : any } - help : string - metavar : string - """ - - # The list of instance attributes that may be set through - # keyword args to the constructor. - ATTRS = ['action', - 'type', - 'dest', - 'default', - 'nargs', - 'const', - 'choices', - 'callback', - 'callback_args', - 'callback_kwargs', - 'help', - 'metavar'] - - # The set of actions allowed by option parsers. Explicitly listed - # here so the constructor can validate its arguments. - ACTIONS = ("store", - "store_const", - "store_true", - "store_false", - "append", - "count", - "callback", - "help", - "version") - - # The set of actions that involve storing a value somewhere; - # also listed just for constructor argument validation. (If - # the action is one of these, there must be a destination.) - STORE_ACTIONS = ("store", - "store_const", - "store_true", - "store_false", - "append", - "count") - - # The set of actions for which it makes sense to supply a value - # type, ie. where we expect an argument to this option. - TYPED_ACTIONS = ("store", - "append", - "callback") - - # The set of known types for option parsers. Again, listed here for - # constructor argument validation. - TYPES = ("string", "int", "long", "float", "complex", "choice") - - # Dictionary of argument checking functions, which convert and - # validate option arguments according to the option type. - # - # Signature of checking functions is: - # check(option : Option, opt : string, value : string) -> any - # where - # option is the Option instance calling the checker - # opt is the actual option seen on the command-line - # (eg. "-a", "--file") - # value is the option argument seen on the command-line - # - # The return value should be in the appropriate Python type - # for option.type -- eg. an integer if option.type == "int". - # - # If no checker is defined for a type, arguments will be - # unchecked and remain strings. - TYPE_CHECKER = { "int" : check_builtin, - "long" : check_builtin, - "float" : check_builtin, - "complex" : check_builtin, - "choice" : check_choice, - } - - - # CHECK_METHODS is a list of unbound method objects; they are called - # by the constructor, in order, after all attributes are - # initialized. The list is created and filled in later, after all - # the methods are actually defined. (I just put it here because I - # like to define and document all class attributes in the same - # place.) Subclasses that add another _check_*() method should - # define their own CHECK_METHODS list that adds their check method - # to those from this class. - CHECK_METHODS = None - - - # -- Constructor/initialization methods ---------------------------- - - def __init__ (self, *opts, **attrs): - # Set _short_opts, _long_opts attrs from 'opts' tuple. - # Have to be set now, in case no option strings are supplied. - self._short_opts = [] - self._long_opts = [] - opts = self._check_opt_strings(opts) - self._set_opt_strings(opts) - - # Set all other attrs (action, type, etc.) from 'attrs' dict - self._set_attrs(attrs) - - # Check all the attributes we just set. There are lots of - # complicated interdependencies, but luckily they can be farmed - # out to the _check_*() methods listed in CHECK_METHODS -- which - # could be handy for subclasses! The one thing these all share - # is that they raise OptionError if they discover a problem. - for checker in self.CHECK_METHODS: - checker(self) - - def _check_opt_strings (self, opts): - # Filter out None because early versions of Optik had exactly - # one short option and one long option, either of which - # could be None. - opts = filter(None, opts) - if not opts: - raise TypeError("at least one option string must be supplied") - return opts - - def _set_opt_strings (self, opts): - for opt in opts: - if len(opt) < 2: - raise OptionError( - "invalid option string %r: " - "must be at least two characters long" % opt, self) - elif len(opt) == 2: - if not (opt[0] == "-" and opt[1] != "-"): - raise OptionError( - "invalid short option string %r: " - "must be of the form -x, (x any non-dash char)" % opt, - self) - self._short_opts.append(opt) - else: - if not (opt[0:2] == "--" and opt[2] != "-"): - raise OptionError( - "invalid long option string %r: " - "must start with --, followed by non-dash" % opt, - self) - self._long_opts.append(opt) - - def _set_attrs (self, attrs): - for attr in self.ATTRS: - if attrs.has_key(attr): - setattr(self, attr, attrs[attr]) - del attrs[attr] - else: - if attr == 'default': - setattr(self, attr, NO_DEFAULT) - else: - setattr(self, attr, None) - if attrs: - raise OptionError( - "invalid keyword arguments: %s" % ", ".join(attrs.keys()), - self) - - - # -- Constructor validation methods -------------------------------- - - def _check_action (self): - if self.action is None: - self.action = "store" - elif self.action not in self.ACTIONS: - raise OptionError("invalid action: %r" % self.action, self) - - def _check_type (self): - if self.type is None: - # XXX should factor out another class attr here: list of - # actions that *require* a type - if self.action in ("store", "append"): - if self.choices is not None: - # The "choices" attribute implies "choice" type. - self.type = "choice" - else: - # No type given? "string" is the most sensible default. - self.type = "string" - else: - if self.type not in self.TYPES: - raise OptionError("invalid option type: %r" % self.type, self) - if self.action not in self.TYPED_ACTIONS: - raise OptionError( - "must not supply a type for action %r" % self.action, self) - - def _check_choice(self): - if self.type == "choice": - if self.choices is None: - raise OptionError( - "must supply a list of choices for type 'choice'", self) - elif type(self.choices) not in (types.TupleType, types.ListType): - raise OptionError( - "choices must be a list of strings ('%s' supplied)" - % str(type(self.choices)).split("'")[1], self) - elif self.choices is not None: - raise OptionError( - "must not supply choices for type %r" % self.type, self) - - def _check_dest (self): - if self.action in self.STORE_ACTIONS and self.dest is None: - # No destination given, and we need one for this action. - # Glean a destination from the first long option string, - # or from the first short option string if no long options. - if self._long_opts: - # eg. "--foo-bar" -> "foo_bar" - self.dest = self._long_opts[0][2:].replace('-', '_') - else: - self.dest = self._short_opts[0][1] - - def _check_const (self): - if self.action != "store_const" and self.const is not None: - raise OptionError( - "'const' must not be supplied for action %r" % self.action, - self) - - def _check_nargs (self): - if self.action in self.TYPED_ACTIONS: - if self.nargs is None: - self.nargs = 1 - elif self.nargs is not None: - raise OptionError( - "'nargs' must not be supplied for action %r" % self.action, - self) - - def _check_callback (self): - if self.action == "callback": - if not callable(self.callback): - raise OptionError( - "callback not callable: %r" % self.callback, self) - if (self.callback_args is not None and - type(self.callback_args) is not types.TupleType): - raise OptionError( - "callback_args, if supplied, must be a tuple: not %r" - % self.callback_args, self) - if (self.callback_kwargs is not None and - type(self.callback_kwargs) is not types.DictType): - raise OptionError( - "callback_kwargs, if supplied, must be a dict: not %r" - % self.callback_kwargs, self) - else: - if self.callback is not None: - raise OptionError( - "callback supplied (%r) for non-callback option" - % self.callback, self) - if self.callback_args is not None: - raise OptionError( - "callback_args supplied for non-callback option", self) - if self.callback_kwargs is not None: - raise OptionError( - "callback_kwargs supplied for non-callback option", self) - - - CHECK_METHODS = [_check_action, - _check_type, - _check_choice, - _check_dest, - _check_const, - _check_nargs, - _check_callback] - - - # -- Miscellaneous methods ----------------------------------------- - - def __str__ (self): - return "/".join(self._short_opts + self._long_opts) - - def takes_value (self): - return self.type is not None - - - # -- Processing methods -------------------------------------------- - - def check_value (self, opt, value): - checker = self.TYPE_CHECKER.get(self.type) - if checker is None: - return value - else: - return checker(self, opt, value) - - def process (self, opt, value, values, parser): - - # First, convert the value(s) to the right type. Howl if any - # value(s) are bogus. - if value is not None: - if self.nargs == 1: - value = self.check_value(opt, value) - else: - value = tuple([self.check_value(opt, v) for v in value]) - - # And then take whatever action is expected of us. - # This is a separate method to make life easier for - # subclasses to add new actions. - return self.take_action( - self.action, self.dest, opt, value, values, parser) - - def take_action (self, action, dest, opt, value, values, parser): - if action == "store": - setattr(values, dest, value) - elif action == "store_const": - setattr(values, dest, self.const) - elif action == "store_true": - setattr(values, dest, True) - elif action == "store_false": - setattr(values, dest, False) - elif action == "append": - values.ensure_value(dest, []).append(value) - elif action == "count": - setattr(values, dest, values.ensure_value(dest, 0) + 1) - elif action == "callback": - args = self.callback_args or () - kwargs = self.callback_kwargs or {} - self.callback(self, opt, value, parser, *args, **kwargs) - elif action == "help": - parser.print_help() - sys.exit(0) - elif action == "version": - parser.print_version() - sys.exit(0) - else: - raise RuntimeError, "unknown action %r" % self.action - - return 1 - -# class Option -"""optik.option_parser - -Provides the OptionParser and Values classes. -""" - -# Copyright (c) 2001-2003 Gregory P. Ward. All rights reserved. -# See the README.txt distributed with Optik for licensing terms. - -# created 2001/10/17, GPW (from optik.py) - - - -def get_prog_name (): - return os.path.basename(sys.argv[0]) - - -SUPPRESS_HELP = "SUPPRESS"+"HELP" -SUPPRESS_USAGE = "SUPPRESS"+"USAGE" - -STD_HELP_OPTION = Option("-h", "--help", - action="help", - help="show this help message and exit") -STD_VERSION_OPTION = Option("--version", - action="version", - help="show program's version number and exit") - - -class Values: - - def __init__ (self, defaults=None): - if defaults: - for (attr, val) in defaults.items(): - setattr(self, attr, val) - - def __repr__ (self): - return ("<%s at 0x%x: %r>" - % (self.__class__.__name__, id(self), self.__dict__)) - - def _update_careful (self, dict): - """ - Update the option values from an arbitrary dictionary, but only - use keys from dict that already have a corresponding attribute - in self. Any keys in dict without a corresponding attribute - are silently ignored. - """ - for attr in dir(self): - if dict.has_key(attr): - dval = dict[attr] - if dval is not None: - setattr(self, attr, dval) - - def _update_loose (self, dict): - """ - Update the option values from an arbitrary dictionary, - using all keys from the dictionary regardless of whether - they have a corresponding attribute in self or not. - """ - self.__dict__.update(dict) - - def _update (self, dict, mode): - if mode == "careful": - self._update_careful(dict) - elif mode == "loose": - self._update_loose(dict) - else: - raise ValueError, "invalid update mode: %r" % mode - - def read_module (self, modname, mode="careful"): - __import__(modname) - mod = sys.modules[modname] - self._update(vars(mod), mode) - - def read_file (self, filename, mode="careful"): - vars = {} - execfile(filename, vars) - self._update(vars, mode) - - def ensure_value (self, attr, value): - if not hasattr(self, attr) or getattr(self, attr) is None: - setattr(self, attr, value) - return getattr(self, attr) - - -class OptionContainer: - - """ - Abstract base class. - - Class attributes: - standard_option_list : [Option] - list of standard options that will be accepted by all instances - of this parser class (intended to be overridden by subclasses). - - Instance attributes: - option_list : [Option] - the list of Option objects contained by this OptionContainer - _short_opt : { string : Option } - dictionary mapping short option strings, eg. "-f" or "-X", - to the Option instances that implement them. If an Option - has multiple short option strings, it will appears in this - dictionary multiple times. [1] - _long_opt : { string : Option } - dictionary mapping long option strings, eg. "--file" or - "--exclude", to the Option instances that implement them. - Again, a given Option can occur multiple times in this - dictionary. [1] - defaults : { string : any } - dictionary mapping option destination names to default - values for each destination [1] - - [1] These mappings are common to (shared by) all components of the - controlling OptionParser, where they are initially created. - - """ - - def __init__ (self, option_class, conflict_handler, description): - # Initialize the option list and related data structures. - # This method must be provided by subclasses, and it must - # initialize at least the following instance attributes: - # option_list, _short_opt, _long_opt, defaults. - self._create_option_list() - - self.option_class = option_class - self.set_conflict_handler(conflict_handler) - self.set_description(description) - - def _create_option_mappings (self): - # For use by OptionParser constructor -- create the master - # option mappings used by this OptionParser and all - # OptionGroups that it owns. - self._short_opt = {} # single letter -> Option instance - self._long_opt = {} # long option -> Option instance - self.defaults = {} # maps option dest -> default value - - - def _share_option_mappings (self, parser): - # For use by OptionGroup constructor -- use shared option - # mappings from the OptionParser that owns this OptionGroup. - self._short_opt = parser._short_opt - self._long_opt = parser._long_opt - self.defaults = parser.defaults - - def set_conflict_handler (self, handler): - if handler not in ("ignore", "error", "resolve"): - raise ValueError, "invalid conflict_resolution value %r" % handler - self.conflict_handler = handler - - def set_description (self, description): - self.description = description - - - # -- Option-adding methods ----------------------------------------- - - def _check_conflict (self, option): - conflict_opts = [] - for opt in option._short_opts: - if self._short_opt.has_key(opt): - conflict_opts.append((opt, self._short_opt[opt])) - for opt in option._long_opts: - if self._long_opt.has_key(opt): - conflict_opts.append((opt, self._long_opt[opt])) - - if conflict_opts: - handler = self.conflict_handler - if handler == "ignore": # behaviour for Optik 1.0, 1.1 - pass - elif handler == "error": # new in 1.2 - raise OptionConflictError( - "conflicting option string(s): %s" - % ", ".join([co[0] for co in conflict_opts]), - option) - elif handler == "resolve": # new in 1.2 - for (opt, c_option) in conflict_opts: - if opt.startswith("--"): - c_option._long_opts.remove(opt) - del self._long_opt[opt] - else: - c_option._short_opts.remove(opt) - del self._short_opt[opt] - if not (c_option._short_opts or c_option._long_opts): - c_option.container.option_list.remove(c_option) - - def add_option (self, *args, **kwargs): - """add_option(Option) - add_option(opt_str, ..., kwarg=val, ...) - """ - if type(args[0]) is types.StringType: - option = self.option_class(*args, **kwargs) - elif len(args) == 1 and not kwargs: - option = args[0] - if not isinstance(option, Option): - raise TypeError, "not an Option instance: %r" % option - else: - raise TypeError, "invalid arguments" - - self._check_conflict(option) - - self.option_list.append(option) - option.container = self - for opt in option._short_opts: - self._short_opt[opt] = option - for opt in option._long_opts: - self._long_opt[opt] = option - - if option.dest is not None: # option has a dest, we need a default - if option.default is not NO_DEFAULT: - self.defaults[option.dest] = option.default - elif not self.defaults.has_key(option.dest): - self.defaults[option.dest] = None - - return option - - def add_options (self, option_list): - for option in option_list: - self.add_option(option) - - # -- Option query/removal methods ---------------------------------- - - def get_option (self, opt_str): - return (self._short_opt.get(opt_str) or - self._long_opt.get(opt_str)) - - def has_option (self, opt_str): - return (self._short_opt.has_key(opt_str) or - self._long_opt.has_key(opt_str)) - - def remove_option (self, opt_str): - option = self._short_opt.get(opt_str) - if option is None: - option = self._long_opt.get(opt_str) - if option is None: - raise ValueError("no such option %r" % opt_str) - - for opt in option._short_opts: - del self._short_opt[opt] - for opt in option._long_opts: - del self._long_opt[opt] - option.container.option_list.remove(option) - - - # -- Help-formatting methods --------------------------------------- - - def format_option_help (self, formatter): - if not self.option_list: - return "" - result = [] - for option in self.option_list: - if not option.help is SUPPRESS_HELP: - result.append(formatter.format_option(option)) - return "".join(result) - - def format_description (self, formatter): - if self.description: - return formatter.format_description(self.description) - else: - return "" - - def format_help (self, formatter): - if self.description: - desc = self.format_description(formatter) + "\n" - else: - desc = "" - return desc + self.format_option_help(formatter) - - -class OptionGroup (OptionContainer): - - def __init__ (self, parser, title, description=None): - self.parser = parser - OptionContainer.__init__( - self, parser.option_class, parser.conflict_handler, description) - self.title = title - - def _create_option_list (self): - self.option_list = [] - self._share_option_mappings(self.parser) - - def set_title (self, title): - self.title = title - - # -- Help-formatting methods --------------------------------------- - - def format_help (self, formatter): - result = formatter.format_heading(self.title) - formatter.indent() - result += OptionContainer.format_help(self, formatter) - formatter.dedent() - return result - - -class OptionParser (OptionContainer): - - """ - Class attributes: - standard_option_list : [Option] - list of standard options that will be accepted by all instances - of this parser class (intended to be overridden by subclasses). - - Instance attributes: - usage : string - a usage string for your program. Before it is displayed - to the user, "%prog" will be expanded to the name of - your program (self.prog or os.path.basename(sys.argv[0])). - prog : string - the name of the current program (to override - os.path.basename(sys.argv[0])). - - allow_interspersed_args : boolean = true - if true, positional arguments may be interspersed with options. - Assuming -a and -b each take a single argument, the command-line - -ablah foo bar -bboo baz - will be interpreted the same as - -ablah -bboo -- foo bar baz - If this flag were false, that command line would be interpreted as - -ablah -- foo bar -bboo baz - -- ie. we stop processing options as soon as we see the first - non-option argument. (This is the tradition followed by - Python's getopt module, Perl's Getopt::Std, and other argument- - parsing libraries, but it is generally annoying to users.) - - rargs : [string] - the argument list currently being parsed. Only set when - parse_args() is active, and continually trimmed down as - we consume arguments. Mainly there for the benefit of - callback options. - largs : [string] - the list of leftover arguments that we have skipped while - parsing options. If allow_interspersed_args is false, this - list is always empty. - values : Values - the set of option values currently being accumulated. Only - set when parse_args() is active. Also mainly for callbacks. - - Because of the 'rargs', 'largs', and 'values' attributes, - OptionParser is not thread-safe. If, for some perverse reason, you - need to parse command-line arguments simultaneously in different - threads, use different OptionParser instances. - - """ - - standard_option_list = [] - - def __init__ (self, - usage=None, - option_list=None, - option_class=Option, - version=None, - conflict_handler="error", - description=None, - formatter=None, - add_help_option=1, - prog=None): - OptionContainer.__init__( - self, option_class, conflict_handler, description) - self.set_usage(usage) - self.prog = prog - self.version = version - self.allow_interspersed_args = 1 - if formatter is None: - formatter = IndentedHelpFormatter() - self.formatter = formatter - - # Populate the option list; initial sources are the - # standard_option_list class attribute, the 'option_list' - # argument, and the STD_VERSION_OPTION (if 'version' supplied) - # and STD_HELP_OPTION globals. - self._populate_option_list(option_list, - add_help=add_help_option) - - self._init_parsing_state() - - # -- Private methods ----------------------------------------------- - # (used by our or OptionContainer's constructor) - - def _create_option_list (self): - self.option_list = [] - self.option_groups = [] - self._create_option_mappings() - - def _populate_option_list (self, option_list, add_help=1): - if self.standard_option_list: - self.add_options(self.standard_option_list) - if option_list: - self.add_options(option_list) - if self.version: - self.add_option(STD_VERSION_OPTION) - if add_help: - self.add_option(STD_HELP_OPTION) - - def _init_parsing_state (self): - # These are set in parse_args() for the convenience of callbacks. - self.rargs = None - self.largs = None - self.values = None - - - # -- Simple modifier methods --------------------------------------- - - def set_usage (self, usage): - if usage is None: - self.usage = "%prog [options]" - elif usage is SUPPRESS_USAGE: - self.usage = None - elif usage.startswith("usage: "): - # for backwards compatibility with Optik 1.3 and earlier - self.usage = usage[7:] - else: - self.usage = usage - - def enable_interspersed_args (self): - self.allow_interspersed_args = 1 - - def disable_interspersed_args (self): - self.allow_interspersed_args = 0 - - def set_default (self, dest, value): - self.defaults[dest] = value - - def set_defaults (self, **kwargs): - self.defaults.update(kwargs) - - def get_default_values (self): - return Values(self.defaults) - - - # -- OptionGroup methods ------------------------------------------- - - def add_option_group (self, *args, **kwargs): - # XXX lots of overlap with OptionContainer.add_option() - if type(args[0]) is types.StringType: - group = OptionGroup(self, *args, **kwargs) - elif len(args) == 1 and not kwargs: - group = args[0] - if not isinstance(group, OptionGroup): - raise TypeError, "not an OptionGroup instance: %r" % group - if group.parser is not self: - raise ValueError, "invalid OptionGroup (wrong parser)" - else: - raise TypeError, "invalid arguments" - - self.option_groups.append(group) - return group - - def get_option_group (self, opt_str): - option = (self._short_opt.get(opt_str) or - self._long_opt.get(opt_str)) - if option and option.container is not self: - return option.container - return None - - - # -- Option-parsing methods ---------------------------------------- - - def _get_args (self, args): - if args is None: - return sys.argv[1:] - else: - return args[:] # don't modify caller's list - - def parse_args (self, args=None, values=None): - """ - parse_args(args : [string] = sys.argv[1:], - values : Values = None) - -> (values : Values, args : [string]) - - Parse the command-line options found in 'args' (default: - sys.argv[1:]). Any errors result in a call to 'error()', which - by default prints the usage message to stderr and calls - sys.exit() with an error message. On success returns a pair - (values, args) where 'values' is an Values instance (with all - your option values) and 'args' is the list of arguments left - over after parsing options. - """ - rargs = self._get_args(args) - if values is None: - values = self.get_default_values() - - # Store the halves of the argument list as attributes for the - # convenience of callbacks: - # rargs - # the rest of the command-line (the "r" stands for - # "remaining" or "right-hand") - # largs - # the leftover arguments -- ie. what's left after removing - # options and their arguments (the "l" stands for "leftover" - # or "left-hand") - self.rargs = rargs - self.largs = largs = [] - self.values = values - - try: - stop = self._process_args(largs, rargs, values) - except (BadOptionError, OptionValueError), err: - self.error(err.msg) - - args = largs + rargs - return self.check_values(values, args) - - def check_values (self, values, args): - """ - check_values(values : Values, args : [string]) - -> (values : Values, args : [string]) - - Check that the supplied option values and leftover arguments are - valid. Returns the option values and leftover arguments - (possibly adjusted, possibly completely new -- whatever you - like). Default implementation just returns the passed-in - values; subclasses may override as desired. - """ - return (values, args) - - def _process_args (self, largs, rargs, values): - """_process_args(largs : [string], - rargs : [string], - values : Values) - - Process command-line arguments and populate 'values', consuming - options and arguments from 'rargs'. If 'allow_interspersed_args' is - false, stop at the first non-option argument. If true, accumulate any - interspersed non-option arguments in 'largs'. - """ - while rargs: - arg = rargs[0] - # We handle bare "--" explicitly, and bare "-" is handled by the - # standard arg handler since the short arg case ensures that the - # len of the opt string is greater than 1. - if arg == "--": - del rargs[0] - return - elif arg[0:2] == "--": - # process a single long option (possibly with value(s)) - self._process_long_opt(rargs, values) - elif arg[:1] == "-" and len(arg) > 1: - # process a cluster of short options (possibly with - # value(s) for the last one only) - self._process_short_opts(rargs, values) - elif self.allow_interspersed_args: - largs.append(arg) - del rargs[0] - else: - return # stop now, leave this arg in rargs - - # Say this is the original argument list: - # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] - # ^ - # (we are about to process arg(i)). - # - # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of - # [arg0, ..., arg(i-1)] (any options and their arguments will have - # been removed from largs). - # - # The while loop will usually consume 1 or more arguments per pass. - # If it consumes 1 (eg. arg is an option that takes no arguments), - # then after _process_arg() is done the situation is: - # - # largs = subset of [arg0, ..., arg(i)] - # rargs = [arg(i+1), ..., arg(N-1)] - # - # If allow_interspersed_args is false, largs will always be - # *empty* -- still a subset of [arg0, ..., arg(i-1)], but - # not a very interesting subset! - - def _match_long_opt (self, opt): - """_match_long_opt(opt : string) -> string - - Determine which long option string 'opt' matches, ie. which one - it is an unambiguous abbrevation for. Raises BadOptionError if - 'opt' doesn't unambiguously match any long option string. - """ - return _match_abbrev(opt, self._long_opt) - - def _process_long_opt (self, rargs, values): - arg = rargs.pop(0) - - # Value explicitly attached to arg? Pretend it's the next - # argument. - if "=" in arg: - (opt, next_arg) = arg.split("=", 1) - rargs.insert(0, next_arg) - had_explicit_value = 1 - else: - opt = arg - had_explicit_value = 0 - - opt = self._match_long_opt(opt) - option = self._long_opt[opt] - if option.takes_value(): - nargs = option.nargs - if len(rargs) < nargs: - if nargs == 1: - self.error("%s option requires a value" % opt) - else: - self.error("%s option requires %d values" - % (opt, nargs)) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - elif had_explicit_value: - self.error("%s option does not take a value" % opt) - - else: - value = None - - option.process(opt, value, values, self) - - def _process_short_opts (self, rargs, values): - arg = rargs.pop(0) - stop = 0 - i = 1 - for ch in arg[1:]: - opt = "-" + ch - option = self._short_opt.get(opt) - i += 1 # we have consumed a character - - if not option: - self.error("no such option: %s" % opt) - if option.takes_value(): - # Any characters left in arg? Pretend they're the - # next arg, and stop consuming characters of arg. - if i < len(arg): - rargs.insert(0, arg[i:]) - stop = 1 - - nargs = option.nargs - if len(rargs) < nargs: - if nargs == 1: - self.error("%s option requires a value" % opt) - else: - self.error("%s option requires %s values" - % (opt, nargs)) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - else: # option doesn't take a value - value = None - - option.process(opt, value, values, self) - - if stop: - break - - - # -- Feedback methods ---------------------------------------------- - - def error (self, msg): - """error(msg : string) - - Print a usage message incorporating 'msg' to stderr and exit. - If you override this in a subclass, it should not return -- it - should either exit or raise an exception. - """ - self.print_usage(sys.stderr) - sys.exit("%s: error: %s" % (get_prog_name(), msg)) - - def get_usage (self): - if self.usage: - return self.formatter.format_usage( - self.usage.replace("%prog", get_prog_name())) - else: - return "" - - def print_usage (self, file=None): - """print_usage(file : file = stdout) - - Print the usage message for the current program (self.usage) to - 'file' (default stdout). Any occurence of the string "%prog" in - self.usage is replaced with the name of the current program - (basename of sys.argv[0]). Does nothing if self.usage is empty - or not defined. - """ - if self.usage: - print >>file, self.get_usage() - - def get_version (self): - if self.version: - return self.version.replace("%prog", get_prog_name()) - else: - return "" - - def print_version (self, file=None): - """print_version(file : file = stdout) - - Print the version message for this program (self.version) to - 'file' (default stdout). As with print_usage(), any occurence - of "%prog" in self.version is replaced by the current program's - name. Does nothing if self.version is empty or undefined. - """ - if self.version: - print >>file, self.get_version() - - def format_option_help (self, formatter=None): - if formatter is None: - formatter = self.formatter - formatter.store_option_strings(self) - result = [] - result.append(formatter.format_heading("options")) - formatter.indent() - if self.option_list: - result.append(OptionContainer.format_option_help(self, formatter)) - result.append("\n") - for group in self.option_groups: - result.append(group.format_help(formatter)) - result.append("\n") - formatter.dedent() - # Drop the last "\n", or the header if no options or option groups: - return "".join(result[:-1]) - - def format_help (self, formatter=None): - if formatter is None: - formatter = self.formatter - result = [] - if self.usage: - result.append(self.get_usage() + "\n") - if self.description: - result.append(self.format_description(formatter) + "\n") - result.append(self.format_option_help(formatter)) - return "".join(result) - - def print_help (self, file=None): - """print_help(file : file = stdout) - - Print an extended help message, listing all options and any - help text provided with them, to 'file' (default stdout). - """ - if file is None: - file = sys.stdout - file.write(self.format_help()) - -# class OptionParser - - -def _match_abbrev (s, wordmap): - """_match_abbrev(s : string, wordmap : {string : Option}) -> string - - Return the string key in 'wordmap' for which 's' is an unambiguous - abbreviation. If 's' is found to be ambiguous or doesn't match any of - 'words', raise BadOptionError. - """ - # Is there an exact match? - if wordmap.has_key(s): - return s - else: - # Isolate all words with s as a prefix. - possibilities = [word for word in wordmap.keys() - if word.startswith(s)] - # No exact match, so there had better be just one possibility. - if len(possibilities) == 1: - return possibilities[0] - elif not possibilities: - raise BadOptionError("no such option: %s" % s) - else: - # More than one possible completion: ambiguous prefix. - raise BadOptionError("ambiguous option: %s (%s?)" - % (s, ", ".join(possibilities))) -"""Text wrapping and filling. -""" - -# Copyright (C) 1999-2001 Gregory P. Ward. -# Copyright (C) 2002, 2003 Python Software Foundation. -# Written by Greg Ward <gward@python.net> - - -# Do the right thing with boolean values for all known Python versions. -try: - True, False -except NameError: - (True, False) = (1, 0) - -class TextWrapper: - """ - Object for wrapping/filling text. The public interface consists of - the wrap() and fill() methods; the other methods are just there for - subclasses to override in order to tweak the default behaviour. - If you want to completely replace the main wrapping algorithm, - you'll probably have to override _wrap_chunks(). - - Several instance attributes control various aspects of wrapping: - width (default: 70) - the maximum width of wrapped lines (unless break_long_words - is false) - initial_indent (default: "") - string that will be prepended to the first line of wrapped - output. Counts towards the line's width. - subsequent_indent (default: "") - string that will be prepended to all lines save the first - of wrapped output; also counts towards each line's width. - expand_tabs (default: true) - Expand tabs in input text to spaces before further processing. - Each tab will become 1 .. 8 spaces, depending on its position in - its line. If false, each tab is treated as a single character. - replace_whitespace (default: true) - Replace all whitespace characters in the input text by spaces - after tab expansion. Note that if expand_tabs is false and - replace_whitespace is true, every tab will be converted to a - single space! - fix_sentence_endings (default: false) - Ensure that sentence-ending punctuation is always followed - by two spaces. Off by default becaus the algorithm is - (unavoidably) imperfect. - break_long_words (default: true) - Break words longer than 'width'. If false, those words will not - be broken, and some lines might be longer than 'width'. - """ - - whitespace_trans = string.maketrans(string.whitespace, - ' ' * len(string.whitespace)) - - # This funky little regex is just the trick for splitting - # text up into word-wrappable chunks. E.g. - # "Hello there -- you goof-ball, use the -b option!" - # splits into - # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! - # (after stripping out empty strings). - wordsep_re = re.compile(r'(\s+|' # any whitespace - r'-*\w{2,}-(?=\w{2,})|' # hyphenated words - r'(?<=\S)-{2,}(?=\w))') # em-dash - - # XXX will there be a locale-or-charset-aware version of - # string.lowercase in 2.3? - sentence_end_re = re.compile(r'[%s]' # lowercase letter - r'[\.\!\?]' # sentence-ending punct. - r'[\"\']?' # optional end-of-quote - % string.lowercase) - - - def __init__ (self, - width=70, - initial_indent="", - subsequent_indent="", - expand_tabs=True, - replace_whitespace=True, - fix_sentence_endings=False, - break_long_words=True): - self.width = width - self.initial_indent = initial_indent - self.subsequent_indent = subsequent_indent - self.expand_tabs = expand_tabs - self.replace_whitespace = replace_whitespace - self.fix_sentence_endings = fix_sentence_endings - self.break_long_words = break_long_words - - - # -- Private methods ----------------------------------------------- - # (possibly useful for subclasses to override) - - def _munge_whitespace(self, text): - """_munge_whitespace(text : string) -> string - - Munge whitespace in text: expand tabs and convert all other - whitespace characters to spaces. Eg. " foo\tbar\n\nbaz" - becomes " foo bar baz". - """ - if self.expand_tabs: - text = text.expandtabs() - if self.replace_whitespace: - text = text.translate(self.whitespace_trans) - return text - - - def _split(self, text): - """_split(text : string) -> [string] - - Split the text to wrap into indivisible chunks. Chunks are - not quite the same as words; see wrap_chunks() for full - details. As an example, the text - Look, goof-ball -- use the -b option! - breaks into the following chunks: - 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', - 'use', ' ', 'the', ' ', '-b', ' ', 'option!' - """ - chunks = self.wordsep_re.split(text) - chunks = filter(None, chunks) - return chunks - - def _fix_sentence_endings(self, chunks): - """_fix_sentence_endings(chunks : [string]) - - Correct for sentence endings buried in 'chunks'. Eg. when the - original text contains "... foo.\nBar ...", munge_whitespace() - and split() will convert that to [..., "foo.", " ", "Bar", ...] - which has one too few spaces; this method simply changes the one - space to two. - """ - i = 0 - pat = self.sentence_end_re - while i < len(chunks)-1: - if chunks[i+1] == " " and pat.search(chunks[i]): - chunks[i+1] = " " - i += 2 - else: - i += 1 - - def _handle_long_word(self, chunks, cur_line, cur_len, width): - """_handle_long_word(chunks : [string], - cur_line : [string], - cur_len : int, width : int) - - Handle a chunk of text (most likely a word, not whitespace) that - is too long to fit in any line. - """ - space_left = width - cur_len - - # If we're allowed to break long words, then do so: put as much - # of the next chunk onto the current line as will fit. - if self.break_long_words: - cur_line.append(chunks[0][0:space_left]) - chunks[0] = chunks[0][space_left:] - - # Otherwise, we have to preserve the long word intact. Only add - # it to the current line if there's nothing already there -- - # that minimizes how much we violate the width constraint. - elif not cur_line: - cur_line.append(chunks.pop(0)) - - # If we're not allowed to break long words, and there's already - # text on the current line, do nothing. Next time through the - # main loop of _wrap_chunks(), we'll wind up here again, but - # cur_len will be zero, so the next line will be entirely - # devoted to the long word that we can't handle right now. - - def _wrap_chunks(self, chunks): - """_wrap_chunks(chunks : [string]) -> [string] - - Wrap a sequence of text chunks and return a list of lines of - length 'self.width' or less. (If 'break_long_words' is false, - some lines may be longer than this.) Chunks correspond roughly - to words and the whitespace between them: each chunk is - indivisible (modulo 'break_long_words'), but a line break can - come between any two chunks. Chunks should not have internal - whitespace; ie. a chunk is either all whitespace or a "word". - Whitespace chunks will be removed from the beginning and end of - lines, but apart from that whitespace is preserved. - """ - lines = [] - - while chunks: - - # Start the list of chunks that will make up the current line. - # cur_len is just the length of all the chunks in cur_line. - cur_line = [] - cur_len = 0 - - # Figure out which static string will prefix this line. - if lines: - indent = self.subsequent_indent - else: - indent = self.initial_indent - - # Maximum width for this line. - width = self.width - len(indent) - - # First chunk on line is whitespace -- drop it. - if chunks[0].strip() == '': - del chunks[0] - - while chunks: - l = len(chunks[0]) - - # Can at least squeeze this chunk onto the current line. - if cur_len + l <= width: - cur_line.append(chunks.pop(0)) - cur_len += l - - # Nope, this line is full. - else: - break - - # The current line is full, and the next chunk is too big to - # fit on *any* line (not just this one). - if chunks and len(chunks[0]) > width: - self._handle_long_word(chunks, cur_line, cur_len, width) - - # If the last chunk on this line is all whitespace, drop it. - if cur_line and cur_line[-1].strip() == '': - del cur_line[-1] - - # Convert current line back to a string and store it in list - # of all lines (return value). - if cur_line: - lines.append(indent + ''.join(cur_line)) - - return lines - - - # -- Public interface ---------------------------------------------- - - def wrap(self, text): - """wrap(text : string) -> [string] - - Reformat the single paragraph in 'text' so it fits in lines of - no more than 'self.width' columns, and return a list of wrapped - lines. Tabs in 'text' are expanded with string.expandtabs(), - and all other whitespace characters (including newline) are - converted to space. - """ - text = self._munge_whitespace(text) - indent = self.initial_indent - if len(text) + len(indent) <= self.width: - return [indent + text] - chunks = self._split(text) - if self.fix_sentence_endings: - self._fix_sentence_endings(chunks) - return self._wrap_chunks(chunks) - - def fill(self, text): - """fill(text : string) -> string - - Reformat the single paragraph in 'text' to fit in lines of no - more than 'self.width' columns, and return a new string - containing the entire wrapped paragraph. - """ - return "\n".join(self.wrap(text)) - - -# -- Convenience interface --------------------------------------------- - -def wrap(text, width=70, **kwargs): - """Wrap a single paragraph of text, returning a list of wrapped lines. - - Reformat the single paragraph in 'text' so it fits in lines of no - more than 'width' columns, and return a list of wrapped lines. By - default, tabs in 'text' are expanded with string.expandtabs(), and - all other whitespace characters (including newline) are converted to - space. See TextWrapper class for available keyword args to customize - wrapping behaviour. - """ - w = TextWrapper(width=width, **kwargs) - return w.wrap(text) - -def fill(text, width=70, **kwargs): - """Fill a single paragraph of text, returning a new string. - - Reformat the single paragraph in 'text' to fit in lines of no more - than 'width' columns, and return a new string containing the entire - wrapped paragraph. As with wrap(), tabs are expanded and other - whitespace characters converted to space. See TextWrapper class for - available keyword args to customize wrapping behaviour. - """ - w = TextWrapper(width=width, **kwargs) - return w.fill(text) - -# Copyright (c) 2001-2003 Gregory P. Ward. All rights reserved. -# See the README.txt distributed with Optik for licensing terms. - -__version__ = "1.4.1" - - - - -# Some day, there might be many Option classes. As of Optik 1.3, the -# preferred way to instantiate Options is indirectly, via make_option(), -# which will become a factory function when there are many Option -# classes. -make_option = Option diff --git a/pypy/tool/option.py b/pypy/tool/option.py deleted file mode 100644 index c65e5f17de..0000000000 --- a/pypy/tool/option.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -from pypy.tool import optik -make_option = optik.make_option - -class Options: - verbose = 0 - showwarning = 0 - spaces = [] - -def run_tb_server(option, opt, value, parser): - from pypy.tool import tb_server - tb_server.start() - -def get_standard_options(): - options = [] - - def objspace_callback(option, opt, value, parser, space): - parser.values.spaces.append(space) - - options.append(make_option( - '-S', action="callback", - callback=objspace_callback, callback_args=("std",), - help="run in std object space")) - options.append(make_option( - '-T', action="callback", - callback=objspace_callback, callback_args=("trivial",), - help="run in trivial object space")) - """ - unneeded options that don't even make sense any more? - options.append(make_option( - '-P', action="callback", - callback=objspace_callback, callback_args=("trace",), - help="run in trace object space")) - options.append(make_option( - '-A', action="callback", - callback=objspace_callback, callback_args=("ann",), - help="run in annotation object space")) - """ - options.append(make_option( - '-v', action="count", dest="verbose", - help="verbose")) - options.append(make_option( - '-w', action="store_true", dest="showwarning", - help="enable warnings (disabled by default)")) - options.append(make_option( - '-H', action="callback", - callback=run_tb_server, - help="use web browser for traceback info")) - - - return options - -def process_options(optionlist, input_options, argv=None): - global Options - Options = input_options - op = optik.OptionParser() - op.add_options(optionlist) - options, args = op.parse_args(argv, input_options) - return args - -def objspace(name='', _spacecache={}): - """ return singleton ObjSpace instance. - - this is configured via the environment variable OBJSPACE - """ - - if not name: - if Options.spaces: - name = Options.spaces[-1] - else: - name = os.environ.get('OBJSPACE', 'std') - - try: - return _spacecache[name] - except KeyError: - module = __import__("pypy.objspace.%s" % name, None, None, ["Space"]) - Space = module.Space - return _spacecache.setdefault(name, Space()) diff --git a/pypy/tool/ppdb.py b/pypy/tool/ppdb.py deleted file mode 100644 index 5af55f1590..0000000000 --- a/pypy/tool/ppdb.py +++ /dev/null @@ -1,22 +0,0 @@ -# it had to happen: a customized version of pdb for pypy; Py Py -# DeBugger, if you please. - -# i only plan to support post morterm debugging! my head hurts if my -# thoughts even go near any alternative! - -import pdb, sys - -class PPdb(pdb.Pdb): - def do_bta(self, line): - self.operr.print_application_traceback(self.space, sys.stdout) - -def post_mortem(space, t, operr): - # need app-level tb too? - p = PPdb() - p.reset() - p.space = space - p.operr = operr - while t.tb_next is not None: - t = t.tb_next - p.interaction(t.tb_frame, t) - diff --git a/pypy/tool/pydis.py b/pypy/tool/pydis.py deleted file mode 100644 index 70f36b470c..0000000000 --- a/pypy/tool/pydis.py +++ /dev/null @@ -1,184 +0,0 @@ -"""disassembler of Python byte code into mnemonics. - -XXX this only works for python-2.3 because of the linenumber - optimization - -""" - -import autopath -import sys - -from pypy.tool.opcode import * -from pypy.tool.opcode import __all__ as _opcodes_all - -__all__ = ["dis","pydisassemble","distb","disco"] + _opcodes_all -del _opcodes_all - -class Bytecode: - def __init__(self, disresult, bytecodeindex, oparg, lineno): - self.disresult = disresult - self.index = bytecodeindex - self.op = ord(disresult.code.co_code[self.index]) - self.name = opname[self.op] - self.oparg = oparg - self.lineno = lineno - - def __eq__(self, other): - return (self.__class__ == other.__class__ and - self.index == other.index and - self.op == other.op and - self.name == other.name and - self.oparg == other.oparg and - self.lineno == other.lineno) - - def __ne__(self, other): - return not (self == other) - - def reprargstring(self): - """ return a string representation of any arguments. (empty for no args)""" - oparg = self.oparg - if oparg is None: - return '' - co = self.disresult.code - op = self.op - - s = repr(oparg).rjust(5) + " " - if op in hasconst: - s += '(' + `co.co_consts[oparg]` + ')' - elif op in hasname: - s += '(' + co.co_names[oparg] + ')' - elif op in hasjrel: - s += '(to ' + repr(self.index + oparg) + ')' - elif op in haslocal: - s += '(' + co.co_varnames[oparg] + ')' - elif op in hascompare: - s += '(' + cmp_op[oparg] + ')' - elif op in hasfree: - #if free is None: - free = co.co_cellvars + co.co_freevars - s += '(' + free[oparg] + ')' - return s - - def __repr__(self): - return self.name + self.reprargstring() - -class DisResult: - """ an instance of this class gets returned for disassembling - objects/functions/code objects whatever. - """ - def __init__(self, code): - self.code = code - self.bytecodes = [] - - def append(self, bytecodeindex, oparg, lineno): - """ append bytecode anaylsis information ...""" - bc = Bytecode(self, bytecodeindex, oparg, lineno) - self.bytecodes.append(bc) - - def getbytecode(self, index): - """ return bytecode instance matching the given index. """ - for bytecode in self.bytecodes: - if bytecode.index == index: - return bytecode - raise ValueError, "no bytecode found on index %s in code \n%s" % ( - index, pydis(self.code)) - - def format(self): - lastlineno = -1 - labels = findlabels(self.code.co_code) - lines = [] - for bc in self.bytecodes: - l = [] - if bc.lineno != lastlineno: - lastlineno = bc.lineno - l.append("%3d" % bc.lineno) - else: - l.append(" ") - l.append(bc.index in labels and ">>" or " ") - l.append(repr(bc.index).rjust(4)) - l.append(bc.name.ljust(20)) - l.append(bc.reprargstring()) - lines.append(" ".join(l)) - return "\n".join(lines) - - __repr__ = format - -def pydis(co): - """return result of dissassembling a code object. """ - - if hasattr(co, 'func_code'): - co = co.func_code - - disresult = DisResult(co) - code = co.co_code - - byte_increments = [ord(c) for c in co.co_lnotab[0::2]] - line_increments = [ord(c) for c in co.co_lnotab[1::2]] - table_length = len(byte_increments) # == len(line_increments) - - lineno = co.co_firstlineno - table_index = 0 - while (table_index < table_length - and byte_increments[table_index] == 0): - lineno += line_increments[table_index] - table_index += 1 - addr = 0 - line_incr = 0 - - n = len(code) - i = 0 - extended_arg = 0 - free = None - while i < n: - c = code[i] - op = ord(c) - - if i >= addr: - lineno += line_incr - while table_index < table_length: - addr += byte_increments[table_index] - line_incr = line_increments[table_index] - table_index += 1 - if line_incr: - break - else: - addr = sys.maxint - current_bytecodeindex = i - i = i+1 - oparg = None - if op >= HAVE_ARGUMENT: - oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg - extended_arg = 0 - i = i+2 - if op == EXTENDED_ARG: - extended_arg = oparg*65536L - - disresult.append(current_bytecodeindex, oparg, lineno) - assert disresult is not None - return disresult - -def findlabels(code): - """Detect all offsets in a byte code which are jump targets. - - Return the list of offsets. - - """ - labels = [] - n = len(code) - i = 0 - while i < n: - c = code[i] - op = ord(c) - i = i+1 - if op >= HAVE_ARGUMENT: - oparg = ord(code[i]) + ord(code[i+1])*256 - i = i+2 - label = -1 - if op in hasjrel: - label = i+oparg - elif op in hasjabs: - label = oparg - if label >= 0: - if label not in labels: - labels.append(label) - return labels diff --git a/pypy/tool/pyrexc b/pypy/tool/pyrexc deleted file mode 100755 index 281df84e0d..0000000000 --- a/pypy/tool/pyrexc +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -# -# Pyrex -- Main Program, Unix -# - -import autopath -from Pyrex.Compiler.Main import main -main(command_line = 1) diff --git a/pypy/tool/pytestsupport.py b/pypy/tool/pytestsupport.py deleted file mode 100644 index 0070b05a6b..0000000000 --- a/pypy/tool/pytestsupport.py +++ /dev/null @@ -1,174 +0,0 @@ -from __future__ import generators -import autopath -import py -from py.__impl__.magic import exprinfo -from pypy.interpreter.gateway import interp2app_temp -from pypy.interpreter.error import OperationError - -# ____________________________________________________________ - -class AppFrame(py.code.Frame): - - def __init__(self, pyframe): - self.code = py.code.Code(pyframe.code) - self.lineno = pyframe.get_last_lineno() - 1 - self.space = pyframe.space - self.w_globals = pyframe.w_globals - self.w_locals = pyframe.getdictscope() - self.f_locals = self.w_locals # for py.test's recursion detection - - def eval(self, code, **vars): - space = self.space - for key, w_value in vars.items(): - space.setitem(self.w_locals, space.wrap(key), w_value) - return space.eval(code, self.w_globals, self.w_locals) - - def exec_(self, code, **vars): - space = self.space - for key, w_value in vars.items(): - space.setitem(self.w_locals, space.wrap(key), w_value) - space.exec_(code, self.w_globals, self.w_locals) - - def repr(self, w_value): - return self.space.unwrap(self.space.repr(w_value)) - - def is_true(self, w_value): - return self.space.is_true(w_value) - -class AppExceptionInfo(py.code.ExceptionInfo): - """An ExceptionInfo object representing an app-level exception.""" - - def __init__(self, space, operr): - self.space = space - self.operr = operr - self.traceback = AppTraceback(self.operr.application_traceback) - - def __str__(self): - return '[app-level] ' + self.operr.errorstr(self.space) - -class AppTracebackEntry(py.code.Traceback.Entry): - exprinfo = None - - def __init__(self, tb): - self.frame = AppFrame(tb.frame) - self.lineno = tb.lineno - 1 - - def reinterpret(self): - # XXX we need to solve a general problem: how to prevent - # reinterpretation from generating a different exception? - # This problem includes the fact that exprinfo will generate - # its own long message that looks like - # OperationError: << [<W_TypeObject(NameError)>: W_StringObj... - # which is much less nice than the one produced by str(self). - # XXX this reinterpret() is only here to prevent reinterpretation. - return self.exprinfo - -class AppTraceback(py.code.Traceback): - Entry = AppTracebackEntry - - def __init__(self, apptb): - l = [] - while apptb is not None: - l.append(self.Entry(apptb)) - apptb = apptb.next - list.__init__(self, l) - -# ____________________________________________________________ - -def build_pytest_assertion(space): - def my_init(space, w_self, __args__): - "Our new AssertionError.__init__()." - w_parent_init = space.getattr(w_BuiltinAssertionError, - space.wrap('__init__')) - space.call_args(w_parent_init, __args__.prepend(w_self)) - framestack = space.getexecutioncontext().framestack - frame = framestack.top(0) - # Argh! we may see app-level helpers in the frame stack! - # that's very probably very bad... - if frame.code.co_name == 'app_normalize_exception': - frame = framestack.top(1) - - runner = AppFrame(frame) - try: - source = runner.statement - source = str(source).strip() - except py.error.ENOENT: - source = None - if source and not py.test.config.option.nomagic: - msg = exprinfo.interpret(source, runner, should_fail=True) - space.setattr(w_self, space.wrap('args'), - space.newtuple([space.wrap(msg)])) - w_msg = space.wrap(msg) - else: - w_msg = space.w_None - space.setattr(w_self, space.wrap('msg'), w_msg) - - # build a new AssertionError class to replace the original one. - w_BuiltinAssertionError = space.getitem(space.w_builtins, - space.wrap('AssertionError')) - w_metaclass = space.type(w_BuiltinAssertionError) - w_init = space.wrap(interp2app_temp(my_init)) - w_dict = space.newdict([]) - space.setitem(w_dict, space.wrap('__init__'), w_init) - return space.call_function(w_metaclass, - space.wrap('AssertionError'), - space.newtuple([w_BuiltinAssertionError]), - w_dict) - -def pypyraises(space, w_ExpectedException, w_expr, __args__): - """A built-in function providing the equivalent of py.test.raises().""" - args_w, kwds_w = __args__.unpack() - if space.is_true(space.isinstance(w_expr, space.w_str)): - if args_w: - raise OperationError(space.w_TypeError, - space.wrap("raises() takes no argument " - "after a string expression")) - expr = space.unwrap(w_expr) - source = py.code.Source(expr) - frame = space.getexecutioncontext().framestack.top() - w_locals = frame.getdictscope() - w_locals = space.call_method(w_locals, 'copy') - for key, w_value in kwds_w.items(): - space.setitem(w_locals, space.wrap(key), w_value) - try: - space.call_method(space.w_builtin, 'eval', - space.wrap(str(source)), - frame.w_globals, - w_locals) - except OperationError, e: - if e.match(space, w_ExpectedException): - return space.sys.exc_info() - raise - else: - try: - space.call_args(w_expr, __args__) - except OperationError, e: - if e.match(space, w_ExpectedException): - return space.sys.exc_info() - raise - raise OperationError(space.w_AssertionError, - space.wrap("DID NOT RAISE")) - -app_raises = interp2app_temp(pypyraises) - -def pypyskip(space, w_message): - """skip a test at app-level. """ - msg = space.unwrap(w_message) - py.test.skip(msg) - -app_skip = interp2app_temp(pypyskip) - -def raises_w(space, w_ExpectedException, *args, **kwds): - try: - excinfo = py.test.raises(OperationError, *args, **kwds) - type, value, tb = excinfo._excinfo - if not value.match(space, w_ExpectedException): - raise type, value, tb - return excinfo - except py.test.Item.ExceptionFailure, e: - e.tbindex = getattr(e, 'tbindex', -1) - 1 - raise - -def eq_w(space, w_obj1, w_obj2): - """ return interp-level boolean of eq(w_obj1, w_obj2). """ - return space.is_true(space.eq(w_obj1, w_obj2)) diff --git a/pypy/tool/stdprofile.py b/pypy/tool/stdprofile.py deleted file mode 100644 index 9569ae5da8..0000000000 --- a/pypy/tool/stdprofile.py +++ /dev/null @@ -1,23 +0,0 @@ -#! /usr/bin/env python - -import autopath -import hotshot, os -from pypy.objspace.std import StdObjSpace - -try: - os.unlink('profile.log') -except: - pass - -p = hotshot.Profile('profile.log') -p.run('StdObjSpace()') -p.close() - -print 'loading...' - -import hotshot.stats -p = hotshot.stats.load('profile.log') -p.strip_dirs() -p.sort_stats('time', 'calls') -p.print_stats(20) - diff --git a/pypy/tool/tb_server/__init__.py b/pypy/tool/tb_server/__init__.py deleted file mode 100644 index 7931373ec1..0000000000 --- a/pypy/tool/tb_server/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Lazy import - -def start(): - global start, stop, publish_exc, wait_until_interrupt - from server import start, stop, publish_exc, wait_until_interrupt - return start() - -def stop(): - pass - -def wait_until_interrupt(): - pass - -def publish_exc(exc): - pass diff --git a/pypy/tool/tb_server/render.py b/pypy/tool/tb_server/render.py deleted file mode 100644 index d583b919e2..0000000000 --- a/pypy/tool/tb_server/render.py +++ /dev/null @@ -1,127 +0,0 @@ -from pypy.tool.tb_server.server import TBRequestHandler -import py -html = py.xml.html - -import traceback -import cgi -import urllib - -views = TBRequestHandler.views - -class URL(object): - attrs='scm','netloc','path','params','query','fragment' - attrindex = dict(zip(attrs, range(len(attrs)))) - # XXX authentication part is not parsed - - def __init__(self, string='', **kw): - from urlparse import urlparse - for name,value in zip(self.attrs, urlparse(string, 'http')): - setattr(self, name, value) - self.__dict__.update(kw) - self.query = cgi.parse_qs(self.query) - - def link_with_options(self, kw): - nq = {} - for k in self.query: - nq[k] = self.query[k][0] - nq.update(kw) - query = urllib.urlencode(nq) - from urlparse import urlunparse - return urlunparse(('', self.netloc, self.path, - self.params, query, self.fragment)) - -class Renderer: - def render(self, path): - url = URL(path) - args = url.path.split('/')[2:] - try: - inner = self.render_self(url, args) - except: - import sys, traceback - lines = traceback.format_exception(*sys.exc_info()) - inner = html.pre( - py.xml.escape(''.join( - ['Internal Rendering Error, traceback follows\n'] + lines))) - - tag = html.html( - html.head(), - html.body( - inner - ) - ) - return tag.unicode(indent=2) - - -class TracebackView(Renderer): - def __init__(self, excinfo): - self.name = 'traceback%d' % len(views) - views[self.name] = self - if not isinstance(excinfo, py.code.ExceptionInfo): - excinfo = py.code.ExceptionInfo(excinfo) - self.excinfo = excinfo - - def render_self(self, url, args): - lines = html.div() - opts = {} - for k in url.query: - ent, opt = k.split(':') - val = int(url.query[k][0]) - opts.setdefault(ent, {})[opt] = val - - i = 0 - for tbentry in self.excinfo.traceback: - lines.append(self.render_tb( - url, tbentry, i, - **opts.get('entry' + str(i), {}))) - i += 1 - - lines.append(html.pre(py.xml.escape(self.excinfo.exconly()))) - return lines - - def render_tb(self, url, tbentry, i, showlocals=0): - lines = html.pre() - filename = tbentry.frame.code.path - lineno = tbentry.lineno + 1 - name = tbentry.frame.code.name - link = '/file%s?line=%d#%d' %(filename, lineno, lineno) - lines.append(' File "%s", line %d, in %s\n'%( - html.a(filename, href=link), lineno, name)) - lines.append(html.a('locals', href=url.link_with_options( - {'entry%d:showlocals' % i : 1-showlocals}))) - lines.append(' ' + - filename.readlines()[lineno-1].lstrip()) - if showlocals: - for k, v in tbentry.frame.f_locals.items(): - if k[0] == '_': - continue - lines.append(py.xml.escape('%s=%s\n'%(k, repr(v)[:1000]))) - return lines - - -def ln(lineno): - return html.a(name=str(lineno)) - -class FileSystemView(Renderer): - def render_self(self, url, args): - fname = '/' + '/'.join(args) - lines = html.table() - i = 1 - hilite = int(url.query.get('line', [-1])[0]) - for line in open(fname): - if i == hilite: - kws = {'style': 'font-weight: bold;'} - else: - kws = {} - row = html.tr( - html.td(html.a("%03d" % i, name=str(i))), - html.td( - html.pre(py.xml.escape(line)[:-1], - **kws), - ), - ) - lines.append(row) - i += 1 - return lines - -views['file'] = FileSystemView() - diff --git a/pypy/tool/tb_server/server.py b/pypy/tool/tb_server/server.py deleted file mode 100644 index bcd798d38f..0000000000 --- a/pypy/tool/tb_server/server.py +++ /dev/null @@ -1,96 +0,0 @@ -from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer -import threading -import sys - -server_thread = None -server_port = None - -class TBRequestHandler(BaseHTTPRequestHandler): - views = {} - - def do_GET(self): - if self.path == '/quit': - global server_thread - server_thread = None - raise SystemExit - i = self.path.find('/', 1) - parts = self.path[1:].split('/', 1) - if not parts: - tp_name = 'traceback' - else: - tb_name = parts[0] - if not self.views.has_key(tb_name): - self.send_response(404) - self.send_header("Content-Type", "text/plain") - self.end_headers() - self.wfile.write('traceback named %r not found' % tb_name) - else: - tbview = self.views[tb_name] - s = tbview.render(self.path) - self.send_response(200) - self.send_header("Content-Type", "text/html; charset=utf-8") - self.end_headers() - self.wfile.write(unicode(s).encode('utf8')) - - def log_message(self, format, *args): - pass - -class TBServer(HTTPServer): - def handle_error(self, request, client_address): - exc = sys.exc_info()[1] - if isinstance(exc, (SystemExit, KeyboardInterrupt)): - raise - else: - HTTPServer.handle_error(self, request, client_address) - -def serve(): - import socket - port = 8080 - while 1: - try: - server = TBServer(('localhost', port), TBRequestHandler) - except socket.error: - port += 1 - continue - else: - break - global server_port - server_port = port - print "serving on", port - server.serve_forever() - -def start(): - global server_thread - server_thread = threading.Thread(target=serve) - server_thread.start() - return server_thread - -def stop(): - if server_thread is None: - return - import urllib2 - try: - urllib2.urlopen('http://localhost:%s/quit'%(server_port,)) - except urllib2.HTTPError: - pass - -def wait_until_interrupt(): - if server_thread is None: - return - print "waiting" - import signal - try: - signal.pause() - except KeyboardInterrupt: - stop() - -def publish_exc(exc): - if server_thread is None: - return - from pypy.tool.tb_server.render import TracebackView - x = TracebackView(exc) - print "traceback is at http://localhost:%d/%s" % (server_port, x.name) - -if __name__ == "__main__": - t = start() - wait_until_interrupt() diff --git a/pypy/tool/test/__init__.py b/pypy/tool/test/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/pypy/tool/test/__init__.py +++ /dev/null diff --git a/pypy/tool/test/autopath.py b/pypy/tool/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/tool/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/tool/test/test_cache.py b/pypy/tool/test/test_cache.py deleted file mode 100644 index 6f80ea2ee8..0000000000 --- a/pypy/tool/test/test_cache.py +++ /dev/null @@ -1,20 +0,0 @@ -import autopath -from pypy.tool.cache import Cache - -class TestCache: - def test_getorbuild(self): - cache = Cache() - assert cache.getorbuild(1, lambda k,s: 42, None) == 42 - assert cache.getorbuild(1, lambda k,s: self.fail(), None) == 42 - # XXX cannot test that any longer: - # XXX hash(cache) now freezes the cache "just-in-time". - # XXX --disabled-- self.assertRaises(TypeError, hash, cache) - cache.clear() - assert cache.getorbuild(1, lambda k,s: 44, None) == 44 - assert cache.getorbuild(1, lambda k,s: self.fail(), None) == 44 - cache.freeze() - hash(cache) - assert cache.getorbuild(1, lambda k,s: self.fail(), None) == 44 - raises(TypeError, cache.clear) - raises(AssertionError, cache.getorbuild, - 2, lambda k,s: self.fail(), 4) diff --git a/pypy/tool/test/test_conftest1.py b/pypy/tool/test/test_conftest1.py deleted file mode 100644 index 7b6c0f43a9..0000000000 --- a/pypy/tool/test/test_conftest1.py +++ /dev/null @@ -1,15 +0,0 @@ - -def test_something(space): - assert space.w_None is space.w_None - -def app_test_something(): - assert 42 == 42 - -class AppTestSomething: - def test_method(self): - assert 23 == 23 - -class TestSomething: - def test_method(self): - assert self.space - diff --git a/pypy/tool/test/test_pytestsupport.py b/pypy/tool/test/test_pytestsupport.py deleted file mode 100644 index 982bd050a9..0000000000 --- a/pypy/tool/test/test_pytestsupport.py +++ /dev/null @@ -1,49 +0,0 @@ -import autopath -from py.__impl__.magic import exprinfo -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import app2interp_temp -from pypy.interpreter.argument import Arguments -from pypy.interpreter.pycode import PyCode -from pypy.interpreter.pyframe import PyFrame -from pypy.tool.pytestsupport import AppFrame, build_pytest_assertion - - -def somefunc(x): - print x - -def test_AppFrame(space): - import sys - co = PyCode()._from_code(somefunc.func_code) - pyframe = PyFrame(space, co, space.newdict([]), None) - runner = AppFrame(pyframe) - exprinfo.run("f = lambda x: x+1", runner) - msg = exprinfo.interpret("assert isinstance(f(2), float)", runner) - assert msg.startswith("assert isinstance(3, float)\n" - " + where 3 = ") - - -def test_myexception(space): - def app_test_func(): - x = 6*7 - assert x == 43 - t = app2interp_temp(app_test_func) - f = t.get_function(space) - space.setitem(space.w_builtins, space.wrap('AssertionError'), - build_pytest_assertion(space)) - try: - f.call_args(Arguments([])) - except OperationError, e: - assert e.match(space, space.w_AssertionError) - assert space.unwrap(space.str(e.w_value)) == 'assert 42 == 43' - else: - assert False, "got no exception!" - -def app_test_exception(): - try: - raise AssertionError("42") - except AssertionError: - pass - else: - raise AssertionError, "app level AssertionError mixup!" - - diff --git a/pypy/tool/test/test_utestconvert.py b/pypy/tool/test/test_utestconvert.py deleted file mode 100644 index f61c22ab70..0000000000 --- a/pypy/tool/test/test_utestconvert.py +++ /dev/null @@ -1,414 +0,0 @@ -import autopath -from pypy.tool.utestconvert import rewrite_utest -import unittest - -class Testit(unittest.TestCase): - def test(self): - assert rewrite_utest("badger badger badger") == ( - "badger badger badger") - - assert rewrite_utest( - "self.assertRaises(excClass, callableObj, *args, **kwargs)" - ) == ( - "raises(excClass, callableObj, *args, **kwargs)" - ) - - assert rewrite_utest( - """ - self.failUnlessRaises(TypeError, func, 42, **{'arg1': 23}) - """ - ) == ( - """ - raises(TypeError, func, 42, **{'arg1': 23}) - """ - ) - assert rewrite_utest( - """ - self.assertRaises(TypeError, - func, - mushroom) - """ - ) == ( - """ - raises(TypeError, - func, - mushroom) - """ - ) - assert rewrite_utest("self.fail()") == "raise AssertionError" - assert rewrite_utest("self.fail('mushroom, mushroom')") == ( - "raise AssertionError, 'mushroom, mushroom'") - assert rewrite_utest("self.assert_(x)") == "assert x" - assert rewrite_utest("self.failUnless(func(x)) # XXX") == ( - "assert func(x) # XXX") - - assert rewrite_utest( - """ - self.assert_(1 + f(y) - + z) # multiline, keep parentheses - """ - ) == ( - """ - assert (1 + f(y) - + z) # multiline, keep parentheses - """ - ) - - assert rewrite_utest("self.assert_(0, 'badger badger')") == ( - "assert 0, 'badger badger'") - - assert rewrite_utest("self.assert_(0, '''badger badger''')") == ( - "assert 0, '''badger badger'''") - - assert rewrite_utest( - r""" - self.assert_(0, - 'Meet the badger.\n') - """ - ) == ( - r""" - assert 0, ( - 'Meet the badger.\n') - """ - ) - - assert rewrite_utest( - r""" - self.failIf(0 + 0 - + len('badger\n') - + 0, '''badger badger badger badger - mushroom mushroom - Snake! Ooh a snake! - ''') # multiline, must move the parens - """ - ) == ( - r""" - assert not (0 + 0 - + len('badger\n') - + 0), '''badger badger badger badger - mushroom mushroom - Snake! Ooh a snake! - ''' # multiline, must move the parens - """ - ) - - assert rewrite_utest("self.assertEquals(0, 0)") == ( - "assert 0 == 0") - - assert rewrite_utest( - r""" - self.assertEquals(0, - 'Run away from the snake.\n') - """ - ) == ( - r""" - assert 0 == ( - 'Run away from the snake.\n') - """ - ) - - assert rewrite_utest( - """ - self.assertEquals(badger + 0 - + mushroom - + snake, 0) - """ - ) == ( - """ - assert (badger + 0 - + mushroom - + snake) == 0 - """ - ) - - assert rewrite_utest( - """ - self.assertNotEquals(badger + 0 - + mushroom - + snake, - mushroom - - badger) - """ - ) == ( - """ - assert (badger + 0 - + mushroom - + snake) != ( - mushroom - - badger) - """ - ) - - assert rewrite_utest( - """ - self.assertEquals(badger(), - mushroom() - + snake(mushroom) - - badger()) - """ - ) == ( - """ - assert badger() == ( - mushroom() - + snake(mushroom) - - badger()) - """ - ) - assert rewrite_utest("self.failIfEqual(0, 0)") == ( - "assert not 0 == 0") - - assert rewrite_utest("self.failUnlessEqual(0, 0)") == ( - "assert not 0 != 0") - - assert rewrite_utest( - """ - self.failUnlessEqual(mushroom() - + mushroom() - + mushroom(), '''badger badger badger - badger badger badger badger - badger badger badger badger - ''') # multiline, must move the parens - """ - ) == ( - """ - assert not (mushroom() - + mushroom() - + mushroom()) != '''badger badger badger - badger badger badger badger - badger badger badger badger - ''' # multiline, must move the parens - """ - ) - - - assert rewrite_utest( - """ - self.assertEquals('''snake snake snake - snake snake snake''', mushroom) - """ - ) == ( - """ - assert '''snake snake snake - snake snake snake''' == mushroom - """ - ) - - assert rewrite_utest( - """ - self.assertEquals(badger(), - snake(), 'BAD BADGER') - """ - ) == ( - """ - assert badger() == ( - snake()), 'BAD BADGER' - """ - ) - - assert rewrite_utest( - """ - self.assertNotEquals(badger(), - snake()+ - snake(), 'POISONOUS MUSHROOM!\ - Ai! I ate a POISONOUS MUSHROOM!!') - """ - ) == ( - """ - assert badger() != ( - snake()+ - snake()), 'POISONOUS MUSHROOM!\ - Ai! I ate a POISONOUS MUSHROOM!!' - """ - ) - - assert rewrite_utest( - """ - self.assertEquals(badger(), - snake(), '''BAD BADGER - BAD BADGER - BAD BADGER''' - ) - """ - ) == ( - """ - assert badger() == ( - snake()), ( '''BAD BADGER - BAD BADGER - BAD BADGER''' - ) - """ - ) - - assert rewrite_utest( - """ - self.assertEquals('''BAD BADGER - BAD BADGER - BAD BADGER''', '''BAD BADGER - BAD BADGER - BAD BADGER''') - """ - ) == ( - """ - assert '''BAD BADGER - BAD BADGER - BAD BADGER''' == '''BAD BADGER - BAD BADGER - BAD BADGER''' - """ - ) - - assert rewrite_utest( - """ - self.assertEquals('''GOOD MUSHROOM - GOOD MUSHROOM - GOOD MUSHROOM''', - '''GOOD MUSHROOM - GOOD MUSHROOM - GOOD MUSHROOM''', - ''' FAILURE - FAILURE - FAILURE''') - """ - ) == ( - """ - assert '''GOOD MUSHROOM - GOOD MUSHROOM - GOOD MUSHROOM''' == ( - '''GOOD MUSHROOM - GOOD MUSHROOM - GOOD MUSHROOM'''), ( - ''' FAILURE - FAILURE - FAILURE''') - """ - ) - - assert rewrite_utest( - """ - self.assertAlmostEquals(first, second, 5, 'A Snake!') - """ - ) == ( - """ - assert round(first - second, 5) == 0, 'A Snake!' - """ - ) - - assert rewrite_utest( - """ - self.assertAlmostEquals(first, second, 120) - """ - ) == ( - """ - assert round(first - second, 120) == 0 - """ - ) - - assert rewrite_utest( - """ - self.assertAlmostEquals(first, second) - """ - ) == ( - """ - assert round(first - second, 7) == 0 - """ - ) - - assert rewrite_utest( - """ - self.assertAlmostEqual(first, second, 5, '''A Snake! - Ohh A Snake! A Snake!! - ''') - """ - ) == ( - """ - assert round(first - second, 5) == 0, '''A Snake! - Ohh A Snake! A Snake!! - ''' - """ - ) - - assert rewrite_utest( - """ - self.assertNotAlmostEqual(first, second, 5, 'A Snake!') - """ - ) == ( - """ - assert round(first - second, 5) != 0, 'A Snake!' - """ - ) - - assert rewrite_utest( - """ - self.failIfAlmostEqual(first, second, 5, 'A Snake!') - """ - ) == ( - """ - assert not round(first - second, 5) == 0, 'A Snake!' - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.failIfAlmostEqual(first, second, 5, 6, 7, 'Too Many Args') - """ - ), - """ - self.failIfAlmostEqual(first, second, 5, 6, 7, 'Too Many Args') - """ - ) - - assert rewrite_utest( - """ - self.failUnlessAlmostEquals(first, second, 5, 'A Snake!') - """ - ) == ( - """ - assert not round(first - second, 5) != 0, 'A Snake!' - """ - ) - - assert rewrite_utest( - """ - self.assertAlmostEquals(now do something reasonable ..() - oops, I am inside a comment as a ''' string, and the fname was - mentioned in passing, leaving us with something that isn't an - expression ... will this blow up? - """ - ) == ( - """ - self.assertAlmostEquals(now do something reasonable ..() - oops, I am inside a comment as a ''' string, and the fname was - mentioned in passing, leaving us with something that isn't an - expression ... will this blow up? - """ - ) - - assert rewrite_utest( - """ - self.failUnless('__builtin__' in modules, "An entry for __builtin__ " - "is not in sys.modules.") - """ - ) == ( - """ - assert '__builtin__' in modules, ( "An entry for __builtin__ " - "is not in sys.modules.") - """ - ) - - # two unittests on the same line separated by a semi-colon is - # only half-converted. Just so you know. - assert rewrite_utest( - """ - self.assertEquals(0, 0); self.assertEquals(1, 1) #not 2 per line! - """ - ) == ( - """ - assert 0 == 0; self.assertEquals(1, 1) #not 2 per line! - """ - ) - - -if __name__ == '__main__': - unittest.main() - - diff --git a/pypy/tool/test/test_utestconvert2.py b/pypy/tool/test/test_utestconvert2.py deleted file mode 100644 index cbf2c613fd..0000000000 --- a/pypy/tool/test/test_utestconvert2.py +++ /dev/null @@ -1,424 +0,0 @@ -import autopath -from pypy.tool.utestconvert import rewrite_utest -import unittest - -class Testit(unittest.TestCase): - def test(self): - self.assertEquals(rewrite_utest("badger badger badger"), - "badger badger badger") - - self.assertEquals(rewrite_utest( - "self.assertRaises(excClass, callableObj, *args, **kwargs)" - ), - "raises(excClass, callableObj, *args, **kwargs)" - ) - - self.assertEquals(rewrite_utest( - """ - self.failUnlessRaises(TypeError, func, 42, **{'arg1': 23}) - """ - ), - """ - raises(TypeError, func, 42, **{'arg1': 23}) - """ - ) - self.assertEquals(rewrite_utest( - """ - self.assertRaises(TypeError, - func, - mushroom) - """ - ), - """ - raises(TypeError, - func, - mushroom) - """ - ) - self.assertEquals(rewrite_utest("self.fail()"), "raise AssertionError") - self.assertEquals(rewrite_utest("self.fail('mushroom, mushroom')"), - "raise AssertionError, 'mushroom, mushroom'") - self.assertEquals(rewrite_utest("self.assert_(x)"), "assert x") - self.assertEquals(rewrite_utest("self.failUnless(func(x)) # XXX"), - "assert func(x) # XXX") - - self.assertEquals(rewrite_utest( - """ - self.assert_(1 + f(y) - + z) # multiline, keep parentheses - """ - ), - """ - assert (1 + f(y) - + z) # multiline, keep parentheses - """ - ) - - self.assertEquals(rewrite_utest("self.assert_(0, 'badger badger')"), - "assert 0, 'badger badger'") - - self.assertEquals(rewrite_utest("self.assert_(0, '''badger badger''')"), - "assert 0, '''badger badger'''") - - self.assertEquals(rewrite_utest( - r""" - self.assert_(0, - 'Meet the badger.\n') - """ - ), - r""" - assert 0, ( - 'Meet the badger.\n') - """ - ) - - self.assertEquals(rewrite_utest( - r""" - self.failIf(0 + 0 - + len('badger\n') - + 0, '''badger badger badger badger - mushroom mushroom - Snake! Ooh a snake! - ''') # multiline, must move the parens - """ - ), - r""" - assert not (0 + 0 - + len('badger\n') - + 0), '''badger badger badger badger - mushroom mushroom - Snake! Ooh a snake! - ''' # multiline, must move the parens - """ - ) - - self.assertEquals(rewrite_utest("self.assertEquals(0, 0)"), - "assert 0 == 0") - - self.assertEquals(rewrite_utest( - r""" - self.assertEquals(0, - 'Run away from the snake.\n') - """ - ), - r""" - assert 0 == ( - 'Run away from the snake.\n') - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertEquals(badger + 0 - + mushroom - + snake, 0) - """ - ), - """ - assert (badger + 0 - + mushroom - + snake) == 0 - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertNotEquals(badger + 0 - + mushroom - + snake, - mushroom - - badger) - """ - ), - """ - assert (badger + 0 - + mushroom - + snake) != ( - mushroom - - badger) - """ - ) - - self.assertEqual(rewrite_utest( - """ - self.assertEquals(badger(), - mushroom() - + snake(mushroom) - - badger()) - """ - ), - """ - assert badger() == ( - mushroom() - + snake(mushroom) - - badger()) - """ - ) - self.assertEquals(rewrite_utest("self.failIfEqual(0, 0)"), - "assert not 0 == 0") - - self.assertEquals(rewrite_utest("self.failUnlessEqual(0, 0)"), - "assert not 0 != 0") - - self.assertEquals(rewrite_utest( - """ - self.failUnlessEqual(mushroom() - + mushroom() - + mushroom(), '''badger badger badger - badger badger badger badger - badger badger badger badger - ''') # multiline, must move the parens - """ - ), - """ - assert not (mushroom() - + mushroom() - + mushroom()) != '''badger badger badger - badger badger badger badger - badger badger badger badger - ''' # multiline, must move the parens - """ - ) - - - self.assertEquals(rewrite_utest( - """ - self.assertEquals('''snake snake snake - snake snake snake''', mushroom) - """ - ), - """ - assert '''snake snake snake - snake snake snake''' == mushroom - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertEquals(badger(), - snake(), 'BAD BADGER') - """ - ), - """ - assert badger() == ( - snake()), 'BAD BADGER' - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertNotEquals(badger(), - snake()+ - snake(), 'POISONOUS MUSHROOM!\ - Ai! I ate a POISONOUS MUSHROOM!!') - """ - ), - """ - assert badger() != ( - snake()+ - snake()), 'POISONOUS MUSHROOM!\ - Ai! I ate a POISONOUS MUSHROOM!!' - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertEquals(badger(), - snake(), '''BAD BADGER - BAD BADGER - BAD BADGER''' - ) - """ - ), - """ - assert badger() == ( - snake()), ( '''BAD BADGER - BAD BADGER - BAD BADGER''' - ) - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertEquals('''BAD BADGER - BAD BADGER - BAD BADGER''', '''BAD BADGER - BAD BADGER - BAD BADGER''') - """ - ), - """ - assert '''BAD BADGER - BAD BADGER - BAD BADGER''' == '''BAD BADGER - BAD BADGER - BAD BADGER''' - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertEquals('''GOOD MUSHROOM - GOOD MUSHROOM - GOOD MUSHROOM''', - '''GOOD MUSHROOM - GOOD MUSHROOM - GOOD MUSHROOM''', - ''' FAILURE - FAILURE - FAILURE''') - """ - ), - """ - assert '''GOOD MUSHROOM - GOOD MUSHROOM - GOOD MUSHROOM''' == ( - '''GOOD MUSHROOM - GOOD MUSHROOM - GOOD MUSHROOM'''), ( - ''' FAILURE - FAILURE - FAILURE''') - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertAlmostEquals(first, second, 5, 'A Snake!') - """ - ), - """ - assert round(first - second, 5) == 0, 'A Snake!' - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertAlmostEquals(first, second, 120) - """ - ), - """ - assert round(first - second, 120) == 0 - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertAlmostEquals(first, second) - """ - ), - """ - assert round(first - second, 7) == 0 - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertAlmostEqual(first, second, 5, '''A Snake! - Ohh A Snake! A Snake!! - ''') - """ - ), - """ - assert round(first - second, 5) == 0, '''A Snake! - Ohh A Snake! A Snake!! - ''' - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertNotAlmostEqual(first, second, 5, 'A Snake!') - """ - ), - """ - assert round(first - second, 5) != 0, 'A Snake!' - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.failIfAlmostEqual(first, second, 5, 'A Snake!') - """ - ), - """ - assert not round(first - second, 5) == 0, 'A Snake!' - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.failIfAlmostEqual(first, second, 5, 6, 7, 'Too Many Args') - """ - ), - """ - self.failIfAlmostEqual(first, second, 5, 6, 7, 'Too Many Args') - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.failUnlessAlmostEquals(first, second, 5, 'A Snake!') - """ - ), - """ - assert not round(first - second, 5) != 0, 'A Snake!' - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.assertAlmostEquals(now do something reasonable ..() - oops, I am inside a comment as a ''' string, and the fname was - mentioned in passing, leaving us with something that isn't an - expression ... will this blow up? - """ - ), - """ - self.assertAlmostEquals(now do something reasonable ..() - oops, I am inside a comment as a ''' string, and the fname was - mentioned in passing, leaving us with something that isn't an - expression ... will this blow up? - """ - ) - - self.assertEquals(rewrite_utest( - """ - happily inside a comment I write self.assertEquals(0, badger) - now will this one get rewritten? - """ - ), - """ - happily inside a comment I write self.assertEquals(0, badger) - now will this one get rewritten? - """ - ) - - self.assertEquals(rewrite_utest( - """ - self.failUnless('__builtin__' in modules, "An entry for __builtin__ " - "is not in sys.modules.") - """ - ), - """ - assert '__builtin__' in modules, ( "An entry for __builtin__ " - "is not in sys.modules.") - """ - ) - - # two unittests on the same line separated by a semi-colon is - # only half-converted. Just so you know. - self.assertEquals(rewrite_utest( - """ - self.assertEquals(0, 0); self.assertEquals(1, 1) #not 2 per line! - """ - ), - """ - assert 0 == 0; self.assertEquals(1, 1) #not 2 per line! - """ - ) - - -if __name__ == '__main__': - unittest.main() diff --git a/pypy/tool/testpm.py b/pypy/tool/testpm.py deleted file mode 100644 index 9dc3e51ddc..0000000000 --- a/pypy/tool/testpm.py +++ /dev/null @@ -1,79 +0,0 @@ -# this file implements a little interactive loop that can be -# optionally entered at the end of a test run to allow inspection (and -# pdb-ing) of failures and/or errors. - -import autopath -from pypy.tool import ppdb -import cmd, traceback - -class TestPM(cmd.Cmd): - def __init__(self, efs): - cmd.Cmd.__init__(self) - self.efs = efs - self.prompt = 'tbi> ' - def emptyline(self): - return 1 - - def do_EOF(self, line): - return 1 - do_q = do_EOF - - def print_tb(self, ef): - from pypy.interpreter.baseobjspace import OperationError - err = ef[1] - print ''.join(traceback.format_exception(*err)) - if isinstance(err[1], OperationError): - print 'and at app-level:' - print - err[1].print_application_traceback(ef[0].space) - - def do_l(self, line): - i = 0 - for t, e in self.efs: - print i, t.__class__.__module__, t.__class__.__name__, t.methodName - i += 1 - - def do_tb(self, arg): - args = arg.split() - if len(args) == 0: - for x in self.efs: - t = x[0] - print t.__class__.__module__, t.__class__.__name__, t.methodName - print - self.print_tb(x) - print - elif len(args) == 1: - try: - tbi = int(args[0]) - except ValueError: - print "error2" - else: - if 0 <= tbi < len(self.efs): - self.print_tb(self.efs[tbi]) - else: - print "error3" - else: - print "error" - - def do_d(self, arg): - args = arg.split() - if len(args) == 1: - try: - efi = int(args[0]) - except ValueError: - print "error2" - else: - if 0 <= efi < len(self.efs): - s, (t, v, tb) = self.efs[efi] - ppdb.post_mortem(s.space, tb, v) - else: - print "error3" - else: - print "error" - - -if __name__ == '__main__': - # just for testing - c = TestPM([]) - c.cmdloop() - diff --git a/pypy/tool/traceinteractive.py b/pypy/tool/traceinteractive.py deleted file mode 100644 index bbe6394092..0000000000 --- a/pypy/tool/traceinteractive.py +++ /dev/null @@ -1,260 +0,0 @@ -# Standard imports -import re -import sys -import code -import keyword - -assert sys.version_info >= (2,3), "sorry, can only run with python2.3 and greater" - -try: - import readline - have_readline = True - -except: - have_readline = False - - -# PyPy imports -import autopath - -from pypy.tool import pydis -from pypy.tool.traceop import ResultPrinter - -from pypy.interpreter import executioncontext, pyframe, baseobjspace -from pypy.interpreter.baseobjspace import ObjSpace - -from pypy.objspace import trace - -class Completer: - """ Stolen mostly from CPython's rlcompleter.py """ - def __init__(self, space): - self.space = space - - def complete(self, text, state): - if state == 0: - if "." in text: - self.matches = self.attr_matches(text) - else: - self.matches = self.global_matches(text) - try: - return self.matches[state] - - except IndexError: - return None - - def global_matches(self, text): - - w_res = self.space.call_method(self.space.w_globals, "keys") - namespace_keys = self.space.unwrap(w_res) - - w_res = self.space.call_method(self.space.w_builtins, "keys") - builtin_keys = self.space.unwrap(w_res) - - matches = [] - n = len(text) - - for l in [namespace_keys, builtin_keys, keyword.kwlist]: - for word in l: - if word[:n] == text and word != "__builtins__": - matches.append(word) - - return matches - - def attr_matches(self, text): - m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) - if not m: - return - - expr, attr = m.group(1, 3) - s = self.space - w_obj = s.eval(expr, s.w_globals, s.w_globals) - words = self.get_words(w_obj) - - w_clz = s.getattr(w_obj, s.wrap("__class__")) - words += self.get_class_members(w_clz) - - matches = [] - n = len(attr) - for word in words: - if word[:n] == attr and word != "__builtins__": - matches.append("%s.%s" % (expr, word)) - - return matches - - def get_words(self, w_clz): - s = self.space - w_dir_func = s.getitem(s.w_builtins, s.wrap("dir")) - w_res = s.call_function(w_dir_func, w_clz) - return s.unwrap(w_res) - - def get_class_members(self, w_clz): - s = self.space - words = self.get_words(w_clz) - try: - w_bases = s.getattr(w_clz, s.wrap("__bases__")) - bases_w = s.unpacktuple(w_bases) - - except OperationError: - return words - - for w_clz in bases_w: - words += self.get_class_members(w_clz) - - return words - -class TraceConsole(code.InteractiveConsole): - def __init__(self, space): - code.InteractiveConsole.__init__(self) - s = self.space = space - s.setitem(s.w_globals, s.wrap("__pytrace__"), s.wrap(0)) - # Trace is binary (on or off), but we have different print levels - # for tracelevel > 0 - self.tracelevel = 0 - self.resprinter = ResultPrinter() - - def interact(self, banner=None): - if banner is None: - banner = self.get_banner() - code.InteractiveConsole.interact(self, banner) - - def get_banner(self): - banner = "PyPy in %s on top of CPython %s\n%s" % ( - space.__class__.__name__, sys.version.split()[0], - " [Use __pytrace__ to set trace level (zero is default and does no tracing)]" ) - return banner - - def raw_input(self, prompt=""): - # add a character to the PyPy prompt so that you know where you - # are when you debug it with "python -i py.py" - try: - return code.InteractiveConsole.raw_input(self, prompt[0] + prompt) - - except KeyboardInterrupt: - s = self.space - # fires into an interpreter-level console - print - banner = ("Python %s on %s\n" % (sys.version, sys.platform) + - "*** Entering interpreter-level console ***") - local = self.__dict__.copy() - for w_name in s.unpackiterable(s.w_globals): - local['w_' + s.unwrap(w_name)] = ( - s.getitem(s.w_globals, w_name)) - code.interact(banner=banner, local=local) - # copy back 'w_' names - for name in local: - if name.startswith('w_'): - s.setitem(s.w_globals, - s.wrap(name[2:]), - local[name]) - print '*** Leaving interpreter-level console ***' - raise - - def set_tracelevel(self, tracelevel): - # Disable tracing altogether? - if self.tracelevel > 0 and tracelevel == 0: - self.space.reset_trace() - print self.get_banner() - - if self.tracelevel == 0 and tracelevel > 0: - trace.create_trace_space(self.space) - print self.get_banner() - - self.tracelevel = tracelevel - - # XXX Do something better than this - I'm not really sure what is useful - # and what is (rxe) - self.resprinter.operations_level = tracelevel - - def runcode(self, code): - # 'code' is a CPython code object - from pypy.interpreter.pycode import PyCode - pycode = PyCode()._from_code(code) - - s = self.space - - try: - if self.tracelevel: - s.settrace() - - pycode.exec_code(s, s.w_globals, s.w_globals) - - res = None - if self.tracelevel: - res = s.getresult() - - # Did we modify __pytrace__ - tracelevel = s.unwrap(s.getitem(s.w_globals, - s.wrap("__pytrace__"))) - - if tracelevel != self.tracelevel: - self.set_tracelevel(tracelevel) - - if res is not None and self.tracelevel: - s.settrace() - self.resprinter.print_result(s, res) - - except baseobjspace.OperationError, operationerr: - if self.tracelevel: - res = s.getresult() - s.settrace() - self.resprinter.print_result(s, res) - - # XXX insert exception info into the application-level sys.last_xxx - print - operationerr.print_application_traceback(self.space) - - else: - print - - def runsource(self, source, ignored_filename = "<input>", symbol = "single"): - hacked_filename = '<inline> ' + source[:80] + "..." - hacked_filename = hacked_filename.replace("\n", r"\n") - try: - code = self.compile(source, hacked_filename, symbol) - - except (OverflowError, SyntaxError, ValueError): - self.showsyntaxerror(self.filename) - return False - - if code is None: - return True - - self.runcode(code) - return False - -def trace_interactive(space, banner = None): - s = space - - # Create an execution context, and set the globals - ec = s.getexecutioncontext() - s.w_globals = ec.make_standard_w_globals() - - s.setitem(s.w_globals, s.wrap("__name__"), s.wrap("__main__")) - console = TraceConsole(s) - - if have_readline: - # Keep here to save windoze tears - readline.set_completer(Completer(s).complete) - readline.parse_and_bind("tab: complete") - readline.set_history_length(25000) - - try: - readline.read_history_file() - - except IOError: - pass # guess it doesn't exit - - import atexit - atexit.register(readline.write_history_file) - - console.interact(banner) - -if __name__ == '__main__': - - from pypy.tool import option - args = option.process_options(option.get_standard_options(), - option.Options) - - # Create objspace... - space = option.objspace() - trace_interactive(space) diff --git a/pypy/tool/traceop.py b/pypy/tool/traceop.py deleted file mode 100644 index 96dd1e7971..0000000000 --- a/pypy/tool/traceop.py +++ /dev/null @@ -1,258 +0,0 @@ - -import autopath - -from pypy.tool import pydis -from pypy.objspace import trace - -def getdisresult(obj, _cache={}): - """ return dissassemble result for the given obj which can be a - pyframe or function or a code object. - """ - obj = getattr(obj, 'func_code', obj) - obj = getattr(obj, 'code', obj) - try: - return _cache[obj] - except KeyError: - disresult = _cache[obj] = pydis.pydis(obj) - return disresult - -class Stack(list): - push = list.append - - def pop(self): - return super(Stack, self).pop(-1) - - def top(self): - try: - return self[-1] - except IndexError: - return None - -class ResultPrinter: - - def __init__(self, - skip_all_below_op = True, - operations_level = 2, - indentor = ' ', - skip_bytecodes = ["PRINT_EXPR", "PRINT_ITEM", "PRINT_NEWLINE"], - ): - - # Configurable stuff - self.indentor = indentor - self.skip_bytecodes = skip_bytecodes - self.operations_level = operations_level - self.skip_all_below_op = skip_all_below_op - - self.reset() - - def reset(self): - # State stuff - self.ops = Stack() - self.frames = Stack() - self.frame_count = 0 - self.skip_frame_count = None - - def print_line(self, line, additional_indent = 0): - if self.skip_frame_count is not None: - return - - if self.frame_count: - indent = self.frame_count + additional_indent - 1 - assert (indent >= 0) - line = (self.indentor * indent) + "|-" + line - - print line - - def print_line_operations(self, line, additional_indent = 0): - # Don't allow operations to be exposed if operations level is up - # but do allow operations to be printed - if len(self.ops) > self.operations_level: - return - - self.print_line(line, additional_indent = additional_indent) - - def print_frame(self, print_type, frame): - - # Don't allow frames to be exposed if operations level is up - if len(self.ops) >= self.operations_level: - return - - code = getattr(frame, 'code', None) - filename = getattr(code, 'co_filename', "") - lineno = getattr(code, 'co_firstlineno', "") - - s = "<<<<<%s %s @ %s>>>>>>>" % (print_type, filename, lineno) - self.print_line(s) - - def print_bytecode(self, index, bytecode): - # Don't allow bytecodes to be exposed if operations level is up - if len(self.ops) >= self.operations_level: - return - - s = "%2d%s%s" % (index, (self.indentor * 2), bytecode) - self.print_line(s) - - - def print_op_enter(self, name, str_args): - s = " " * 17 - s += ">> %s%s" % (name, str_args) - self.print_line_operations(s) - - - def print_op_leave(self, name, str_res): - s = " " * 20 - s += "%s =: %s" % (name, str_res) - self.print_line_operations(s) - - - def print_op_exc(self, name, exc): - s = " " * 17 - s += "x= %s %s" % (name, exc) - self.print_line_operations(s) - - - def print_result(self, space, traceres): - - self.reset() - - for event in traceres.getevents(): - - if isinstance(event, trace.EnterFrame): - frame = event.frame - self.print_frame("enter", frame) - - self.frames.push(frame) - self.frame_count += 1 - - elif isinstance(event, trace.LeaveFrame): - lastframe = self.frames.pop() - self.frame_count -= 1 - - # Reset skip frame count? - if self.frame_count < self.skip_frame_count: - self.skip_frame_count = None - - self.print_frame("leave", lastframe) - - elif isinstance(event, trace.ExecBytecode): - - # Reset skip frame count? - if self.frame_count == self.skip_frame_count: - self.skip_frame_count = None - - frame = event.frame - assert (frame == self.frames.top()) - - # Get bytecode from frame - disresult = getdisresult(frame) - bytecode = disresult.getbytecode(event.index) - self.print_bytecode(event.index, bytecode) - - # When operations_level > 1, some bytecodes produce high number of - # operations / bytecodes (usually because they have been written at app - # level) - this hack avoids them recursing on them selves - if bytecode.name in self.skip_bytecodes: - self.print_line("...", 1) - self.skip_frame_count = self.frame_count - - elif isinstance(event, trace.CallBegin): - info = event.callinfo - - self.ops.push(info) - lastframe = self.frames.top() - self.print_op_enter(info.name, repr_args(space, lastframe, info.args)) - self.frame_count += 1 - - elif isinstance(event, trace.CallFinished): - info = event.callinfo - - self.frame_count -= 1 - self.print_op_leave(info.name, repr_value(space, event.res)) - - assert self.ops.pop() == event.callinfo - - elif isinstance(event, trace.CallException): - info = event.callinfo - self.frame_count -= 1 - - self.print_op_exc(info.name, event.ex) - - assert self.ops.pop() == event.callinfo - - else: - pass - -print_result = ResultPrinter().print_result - -def repr_value(space, value): - """ representations for debugging purposes """ - res = str(value) - try: - # XXX Sure this won't go down well - didn't really want - # to clutter up the interpeter code - from pypy.interpreter.argument import Arguments - from pypy.interpreter.function import Function, Method - - if isinstance(value, Function): - res = "Function(%s, %s)" % (value.name, value.code) - - if isinstance(value, Method): - res = "Method(%s, %s)" % (value.w_function.name, value.w_function.code) - - except Exception, exc: - pass - - return res[:240] - -def repr_args(space, frame, args): - l = [] - for arg in args: - if frame and space.is_true(space.is_(arg, frame.w_globals)): - l.append('w_globals') - elif frame and space.is_true(space.is_(arg, space.w_builtins)): - l.append('w_builtins') - else: - l.append(repr_value(space, arg)) - - return "(" + ", ".join(l) + ")" - - -def perform_trace(tspace, app_func, *args_w, **kwds_w): - from pypy.interpreter.gateway import app2interp - from pypy.interpreter.argument import Arguments - - # Create our function - func_gw = app2interp(app_func) - func = func_gw.get_function(tspace) - w_func = tspace.wrap(func) - args = Arguments(tspace, args_w, kwds_w) - - # Run the func in the trace space and return results - tspace.settrace() - w_result = tspace.call_args(w_func, args) - trace_result = tspace.getresult() - tspace.settrace() - return w_result, trace_result - - -if __name__ == '__main__': - from pypy.tool import option - args = option.process_options(option.get_standard_options(), - option.Options) - - # Create objspace... - space = option.objspace() - - # Wrap up our space, with a trace space - tspace = trace.create_trace_space(space) - - def app_test(x): - count = 0 - for ii in range(x): - count += ii - return count - - res, traceres = perform_trace(tspace, app_test, tspace.wrap(5)) - print_result(tspace, traceres) - - print "Result", res diff --git a/pypy/tool/udir.py b/pypy/tool/udir.py deleted file mode 100644 index 67ea24fa5d..0000000000 --- a/pypy/tool/udir.py +++ /dev/null @@ -1,6 +0,0 @@ -import autopath - -from py.path import local - -udir = local.make_numbered_dir(base='usession-', keep=3) - diff --git a/pypy/tool/uid.py b/pypy/tool/uid.py deleted file mode 100644 index 037980a9ff..0000000000 --- a/pypy/tool/uid.py +++ /dev/null @@ -1,12 +0,0 @@ -import sys -HUGEINT = (sys.maxint + 1L) * 2L - -def uid(obj): - """ - Return the id of an object as an unsigned number so that its hex - representation makes sense - """ - rval = id(obj) - if rval < 0: - rval += HUGEINT - return rval diff --git a/pypy/tool/utestconvert.py b/pypy/tool/utestconvert.py deleted file mode 100644 index 4ab36eb741..0000000000 --- a/pypy/tool/utestconvert.py +++ /dev/null @@ -1,251 +0,0 @@ -import re -import sys -import parser - -d={} -# d is the dictionary of unittest changes, keyed to the old name -# used by unittest. -# d[old][0] is the new replacement function. -# d[old][1] is the operator you will substitute, or '' if there is none. -# d[old][2] is the possible number of arguments to the unittest -# function. - -# Old Unittest Name new name operator # of args -d['assertRaises'] = ('raises', '', ['Any']) -d['fail'] = ('raise AssertionError', '', [0,1]) -d['assert_'] = ('assert', '', [1,2]) -d['failIf'] = ('assert not', '', [1,2]) -d['assertEqual'] = ('assert', ' ==', [2,3]) -d['failIfEqual'] = ('assert not', ' ==', [2,3]) -d['assertNotEqual'] = ('assert', ' !=', [2,3]) -d['failUnlessEqual'] = ('assert not', ' !=', [2,3]) -d['assertAlmostEqual'] = ('assert round', ' ==', [2,3,4]) -d['failIfAlmostEqual'] = ('assert not round', ' ==', [2,3,4]) -d['assertNotAlmostEqual'] = ('assert round', ' !=', [2,3,4]) -d['failUnlessAlmostEquals'] = ('assert not round', ' !=', [2,3,4]) -# PyPy specific -d['assertRaises_w'] = ('self.space.raises_w', '', ['Any']) -d['assertEqual_w'] = ('assert self.space.eq_w','',['Any']) -d['assertNotEqual_w'] = ('assert not self.space.eq_w','',['Any']) -d['failUnless_w'] = ('assert self.space.is_true','',['Any']) -d['failIf_w'] = ('assert not self.space.is_true','',['Any']) - -# the list of synonyms -d['failUnlessRaises'] = d['assertRaises'] -d['failUnless'] = d['assert_'] -d['assertEquals'] = d['assertEqual'] -d['assertNotEquals'] = d['assertNotEqual'] -d['assertAlmostEquals'] = d['assertAlmostEqual'] -d['assertNotAlmostEquals'] = d['assertNotAlmostEqual'] - -# set up the regular expressions we will need -leading_spaces = re.compile(r'^(\s*)') # this never fails - -pat = '' -for k in d.keys(): # this complicated pattern to match all unittests - pat += '|' + r'^(\s*)' + 'self.' + k + r'\(' # \tself.whatever( - -old_names = re.compile(pat[1:]) -linesep='\n' # nobody will really try to convert files not read - # in text mode, will they? - - -def blocksplitter(fp): - '''split a file into blocks that are headed by functions to rename''' - - blocklist = [] - blockstring = '' - - for line in fp: - interesting = old_names.match(line) - if interesting : - if blockstring: - blocklist.append(blockstring) - blockstring = line # reset the block - else: - blockstring += line - - blocklist.append(blockstring) - return blocklist - -def rewrite_utest(block): - '''rewrite every block to use the new utest functions''' - - '''returns the rewritten unittest, unless it ran into problems, - in which case it just returns the block unchanged. - ''' - utest = old_names.match(block) - - if not utest: - return block - - old = utest.group(0).lstrip()[5:-1] # the name we want to replace - new = d[old][0] # the name of the replacement function - op = d[old][1] # the operator you will use , or '' if there is none. - possible_args = d[old][2] # a list of the number of arguments the - # unittest function could possibly take. - - if possible_args == ['Any']: # just rename assertRaises & friends - return re.sub('self.'+old, new, block) - - message_pos = possible_args[-1] - # the remaining unittests can have an optional message to print - # when they fail. It is always the last argument to the function. - - try: - indent, argl, trailer = decompose_unittest(old, block) - - except SyntaxError: # but we couldn't parse it! - return block - - argnum = len(argl) - if argnum not in possible_args: - # sanity check - this one isn't real either - return block - - elif argnum == message_pos: - message = argl[-1] - argl = argl[:-1] - else: - message = None - - if argnum is 0 or (argnum is 1 and argnum is message_pos): #unittest fail() - string = '' - if message: - message = ' ' + message - - elif message_pos is 4: # assertAlmostEqual & friends - try: - pos = argl[2].lstrip() - except IndexError: - pos = '7' # default if none is specified - string = '(%s -%s, %s)%s 0' % (argl[0], argl[1], pos, op ) - - else: # assert_, assertEquals and all the rest - string = ' ' + op.join(argl) - - if message: - string = string + ',' + message - - return indent + new + string + trailer - -def decompose_unittest(old, block): - '''decompose the block into its component parts''' - - ''' returns indent, arglist, trailer - indent -- the indentation - arglist -- the arguments to the unittest function - trailer -- any extra junk after the closing paren, such as #commment - ''' - - indent = re.match(r'(\s*)', block).group() - pat = re.search('self.' + old + r'\(', block) - - args, trailer = get_expr(block[pat.end():], ')') - arglist = break_args(args, []) - - if arglist == ['']: # there weren't any - return indent, [], trailer - - for i in range(len(arglist)): - try: - parser.expr(arglist[i].lstrip('\t ')) - except SyntaxError: - if i == 0: - arglist[i] = '(' + arglist[i] + ')' - else: - arglist[i] = ' (' + arglist[i] + ')' - - return indent, arglist, trailer - -def break_args(args, arglist): - '''recursively break a string into a list of arguments''' - try: - first, rest = get_expr(args, ',') - if not rest: - return arglist + [first] - else: - return [first] + break_args(rest, arglist) - except SyntaxError: - return arglist + [args] - -def get_expr(s, char): - '''split a string into an expression, and the rest of the string''' - - pos=[] - for i in range(len(s)): - if s[i] == char: - pos.append(i) - if pos == []: - raise SyntaxError # we didn't find the expected char. Ick. - - for p in pos: - # make the python parser do the hard work of deciding which comma - # splits the string into two expressions - try: - parser.expr('(' + s[:p] + ')') - return s[:p], s[p+1:] - except SyntaxError: # It's not an expression yet - pass - raise SyntaxError # We never found anything that worked. - -if __name__ == '__main__': - - from optparse import OptionParser - import sys - - usage = "usage: %prog [-s [filename ...] | [-i | -c filename ...]]" - optparser = OptionParser(usage) - - def select_output (option, opt, value, optparser, **kw): - if hasattr(optparser, 'output'): - optparser.error( - 'Cannot combine -s -i and -c options. Use one only.') - else: - optparser.output = kw['output'] - - optparser.add_option("-s", "--stdout", action="callback", - callback=select_output, - callback_kwargs={'output':'stdout'}, - help="send your output to stdout") - - optparser.add_option("-i", "--inplace", action="callback", - callback=select_output, - callback_kwargs={'output':'inplace'}, - help="overwrite files in place") - - optparser.add_option("-c", "--copy", action="callback", - callback=select_output, - callback_kwargs={'output':'copy'}, - help="copy files ... fn.py --> fn_cp.py") - - options, args = optparser.parse_args() - - output = getattr(optparser, 'output', 'stdout') - - if output in ['inplace', 'copy'] and not args: - optparser.error( - '-i and -c option require at least one filename') - - if not args: - s = '' - for block in blocksplitter(sys.stdin.read()): - s += rewrite_utest(block) - sys.stdout.write(s) - - else: - for infilename in args: # no error checking to see if we can open, etc. - infile = file(infilename) - s = '' - for block in blocksplitter(infile): - s += rewrite_utest(block) - if output == 'inplace': - outfile = file(infilename, 'w+') - elif output == 'copy': # yes, just go clobber any existing .cp - outfile = file (infilename[:-3]+ '_cp.py', 'w+') - else: - outfile = sys.stdout - - outfile.write(s) - - diff --git a/pypy/translator/__init__.py b/pypy/translator/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/pypy/translator/__init__.py +++ /dev/null diff --git a/pypy/translator/annrpython.py b/pypy/translator/annrpython.py deleted file mode 100644 index a67fa4bf2b..0000000000 --- a/pypy/translator/annrpython.py +++ /dev/null @@ -1,397 +0,0 @@ -from __future__ import generators - -from types import FunctionType, ClassType -from pypy.annotation import model as annmodel -from pypy.annotation.model import pair -from pypy.annotation.factory import ListFactory, DictFactory, BlockedInference -from pypy.annotation.bookkeeper import Bookkeeper -from pypy.objspace.flow.model import Variable, Constant, UndefinedConstant -from pypy.objspace.flow.model import SpaceOperation, FunctionGraph -from pypy.objspace.flow.model import last_exception, last_exc_value -from pypy.interpreter.pycode import cpython_code_signature -from pypy.interpreter.argument import ArgErr - - -class AnnotatorError(Exception): - pass - - -class RPythonAnnotator: - """Block annotator for RPython. - See description in doc/translation/annotation.txt.""" - - def __init__(self, translator=None): - self.translator = translator - self.pendingblocks = {} # map {block: function} - self.bindings = {} # map Variables to SomeValues - self.annotated = {} # set of blocks already seen - self.links_followed = {} # set of links that have ever been followed - self.notify = {} # {block: {positions-to-reflow-from-when-done}} - # --- the following information is recorded for debugging only --- - # --- and only if annotation.model.DEBUG is kept to True - self.why_not_annotated = {} # {block: (exc_type, exc_value, traceback)} - # records the location of BlockedInference - # exceptions that blocked some blocks. - self.blocked_functions = {} # set of functions that have blocked blocks - self.bindingshistory = {}# map Variables to lists of SomeValues - self.binding_caused_by = {} # map Variables to position_keys - # records the caller position that caused bindings of inputargs - # to be updated - self.binding_cause_history = {} # map Variables to lists of positions - # history of binding_caused_by, kept in sync with - # bindingshistory - # --- end of debugging information --- - self.bookkeeper = Bookkeeper(self) - - #___ convenience high-level interface __________________ - - def build_types(self, func_or_flowgraph, input_arg_types, func=None): - """Recursively build annotations about the specific entry point.""" - if isinstance(func_or_flowgraph, FunctionGraph): - flowgraph = func_or_flowgraph - else: - func = func_or_flowgraph - if self.translator is None: - from pypy.translator.translator import Translator - self.translator = Translator(func, simplifying=True) - self.translator.annotator = self - flowgraph = self.translator.getflowgraph(func) - # make input arguments and set their type - input_arg_types = list(input_arg_types) - nbarg = len(flowgraph.getargs()) - while len(input_arg_types) < nbarg: - input_arg_types.append(object) - inputcells = [] - for t in input_arg_types: - if not isinstance(t, annmodel.SomeObject): - t = self.bookkeeper.valueoftype(t) - inputcells.append(t) - - # register the entry point - self.addpendingblock(func, flowgraph.startblock, inputcells) - # recursively proceed until no more pending block is left - self.complete() - return self.binding(flowgraph.getreturnvar()) - - def gettype(self, variable): - """Return the known type of a control flow graph variable, - defaulting to 'object'.""" - if isinstance(variable, Constant): - return type(variable.value) - elif isinstance(variable, Variable): - cell = self.bindings.get(variable) - if cell: - return cell.knowntype - else: - return object - else: - raise TypeError, ("Variable or Constant instance expected, " - "got %r" % (variable,)) - - def getuserclasses(self): - """Return a set of known user classes.""" - return self.bookkeeper.userclasses - - def getuserclassdefinitions(self): - """Return a list of ClassDefs.""" - return self.bookkeeper.userclasseslist - - def getuserattributes(self, cls): - """Enumerate the attributes of the given user class, as Variable()s.""" - clsdef = self.bookkeeper.userclasses[cls] - for attr, s_value in clsdef.attrs.items(): - v = Variable(name=attr) - self.bindings[v] = s_value - yield v - - #___ medium-level interface ____________________________ - - def addpendingblock(self, fn, block, cells, called_from=None): - """Register an entry point into block with the given input cells.""" - assert self.translator is None or fn in self.translator.flowgraphs - for a in cells: - assert isinstance(a, annmodel.SomeObject) - if block not in self.annotated: - self.bindinputargs(block, cells, called_from) - else: - self.mergeinputargs(block, cells, called_from) - if not self.annotated[block]: - self.pendingblocks[block] = fn - - def complete(self): - """Process pending blocks until none is left.""" - while self.pendingblocks: - block, fn = self.pendingblocks.popitem() - self.processblock(fn, block) - if False in self.annotated.values(): - if annmodel.DEBUG: - for block in self.annotated: - if self.annotated[block] is False: - fn = self.why_not_annotated[block][1].break_at[0] - self.blocked_functions[fn] = True - import traceback - print '-+' * 30 - print 'BLOCKED block at:', - print self.why_not_annotated[block][1].break_at - print 'because of:' - traceback.print_exception(*self.why_not_annotated[block]) - print '-+' * 30 - print - print "++-" * 20 - print ('%d blocks are still blocked' % - self.annotated.values().count(False)) - print "continuing anyway ...." - print "++-" * 20 - - - def binding(self, arg, in_link=None): - "Gives the SomeValue corresponding to the given Variable or Constant." - if isinstance(arg, Variable): - return self.bindings[arg] - elif isinstance(arg, UndefinedConstant): # undefined local variables - return annmodel.SomeImpossibleValue() - elif isinstance(arg, Constant): - if arg.value is last_exception or arg.value is last_exc_value: - assert in_link - assert isinstance(in_link.exitcase, type(Exception)) - assert issubclass(in_link.exitcase, Exception) - return annmodel.SomeObject() # XXX - return self.bookkeeper.immutablevalue(arg.value) - else: - raise TypeError, 'Variable or Constant expected, got %r' % (arg,) - - def setbinding(self, arg, s_value, called_from=None): - if arg in self.bindings: - assert s_value.contains(self.bindings[arg]) - # for debugging purposes, record the history of bindings that - # have been given to this variable - if annmodel.DEBUG: - history = self.bindingshistory.setdefault(arg, []) - history.append(self.bindings[arg]) - cause_history = self.binding_cause_history.setdefault(arg, []) - cause_history.append(self.binding_caused_by[arg]) - self.bindings[arg] = s_value - if annmodel.DEBUG: - self.binding_caused_by[arg] = called_from - - - #___ interface for annotator.factory _______ - - def recursivecall(self, func, position_key, args): - parent_fn, parent_block, parent_index = position_key - graph = self.translator.getflowgraph(func, parent_fn, - position_key) - # self.notify[graph.returnblock] is a dictionary of call - # points to this func which triggers a reflow whenever the - # return block of this graph has been analysed. - callpositions = self.notify.setdefault(graph.returnblock, {}) - callpositions[position_key] = True - - # parse the arguments according to the function we are calling - signature = cpython_code_signature(func.func_code) - defs_s = [] - if func.func_defaults: - for x in func.func_defaults: - defs_s.append(self.bookkeeper.immutablevalue(x)) - try: - inputcells = args.match_signature(signature, defs_s) - except ArgErr, e: - print 'IGNORED', e # hopefully temporary hack - return annmodel.SomeImpossibleValue() - - # generalize the function's input arguments - self.addpendingblock(func, graph.startblock, inputcells, position_key) - - # get the (current) return value - v = graph.getreturnvar() - try: - return self.bindings[v] - except KeyError: - # let's see if the graph only has exception returns - if graph.hasonlyexceptionreturns(): - # XXX for functions with exceptions what to - # do anyway? - return self.bookkeeper.immutablevalue(None) - return annmodel.SomeImpossibleValue() - - def reflowfromposition(self, position_key): - fn, block, index = position_key - self.reflowpendingblock(fn, block) - - - #___ simplification (should be moved elsewhere?) _______ - - # it should be! - # now simplify_calls is moved to transform.py. - # i kept reverse_binding here for future(?) purposes though. --sanxiyn - - def reverse_binding(self, known_variables, cell): - """This is a hack.""" - # In simplify_calls, when we are trying to create the new - # SpaceOperation, all we have are SomeValues. But SpaceOperations take - # Variables, not SomeValues. Trouble is, we don't always have a - # Variable that just happens to be bound to the given SomeValue. - # A typical example would be if the tuple of arguments was created - # from another basic block or even another function. Well I guess - # there is no clean solution, short of making the transformations - # more syntactic (e.g. replacing a specific sequence of SpaceOperations - # with another one). This is a real hack because we have to use - # the identity of 'cell'. - if cell.is_constant(): - return Constant(cell.const) - else: - for v in known_variables: - if self.bindings[v] is cell: - return v - else: - raise CannotSimplify - - def simplify(self): - # Generic simplifications - from pypy.translator import transform - transform.transform_graph(self) - from pypy.translator import simplify - for graph in self.translator.flowgraphs.values(): - simplify.eliminate_empty_blocks(graph) - - - #___ flowing annotations in blocks _____________________ - - def processblock(self, fn, block): - # Important: this is not called recursively. - # self.flowin() can only issue calls to self.addpendingblock(). - # The analysis of a block can be in three states: - # * block not in self.annotated: - # never seen the block. - # * self.annotated[block] == False: - # the input variables of the block are in self.bindings but we - # still have to consider all the operations in the block. - # * self.annotated[block] == True or <original function object>: - # analysis done (at least until we find we must generalize the - # input variables). - - #print '* processblock', block, cells - self.annotated[block] = fn or True - try: - self.flowin(fn, block) - except BlockedInference, e: - #print '_'*60 - #print 'Blocked at %r:' % (e.break_at,) - #import traceback, sys - #traceback.print_tb(sys.exc_info()[2]) - self.annotated[block] = False # failed, hopefully temporarily - if annmodel.DEBUG: - import sys - self.why_not_annotated[block] = sys.exc_info() - except Exception, e: - # hack for debug tools only - if not hasattr(e, '__annotator_block'): - setattr(e, '__annotator_block', block) - raise - - def reflowpendingblock(self, fn, block): - self.pendingblocks[block] = fn - assert block in self.annotated - self.annotated[block] = False # must re-flow - - def bindinputargs(self, block, inputcells, called_from=None): - # Create the initial bindings for the input args of a block. - assert len(block.inputargs) == len(inputcells) - for a, cell in zip(block.inputargs, inputcells): - self.setbinding(a, cell, called_from) - self.annotated[block] = False # must flowin. - - def mergeinputargs(self, block, inputcells, called_from=None): - # Merge the new 'cells' with each of the block's existing input - # variables. - oldcells = [self.binding(a) for a in block.inputargs] - unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)] - # if the merged cells changed, we must redo the analysis - if unions != oldcells: - self.bindinputargs(block, unions, called_from) - - def flowin(self, fn, block): - #print 'Flowing', block, [self.binding(a) for a in block.inputargs] - for i in range(len(block.operations)): - try: - self.bookkeeper.enter((fn, block, i)) - self.consider_op(block.operations[i]) - finally: - self.bookkeeper.leave() - # dead code removal: don't follow all exits if the exitswitch is known - exits = block.exits - if isinstance(block.exitswitch, Variable): - s_exitswitch = self.bindings[block.exitswitch] - if s_exitswitch.is_constant(): - exits = [link for link in exits - if link.exitcase == s_exitswitch.const] - knownvar, knownvarvalue = getattr(self.bindings.get(block.exitswitch), - "knowntypedata", (None, None)) - for link in exits: - self.links_followed[link] = True - cells = [] - for a in link.args: - cell = self.binding(a, in_link=link) - if link.exitcase is True and a is knownvar \ - and not knownvarvalue.contains(cell): - cell = knownvarvalue - cells.append(cell) - self.addpendingblock(fn, link.target, cells) - if block in self.notify: - # reflow from certain positions when this block is done - for position_key in self.notify[block]: - self.reflowfromposition(position_key) - - - #___ creating the annotations based on operations ______ - - def consider_op(self,op): - argcells = [self.binding(a) for a in op.args] - consider_meth = getattr(self,'consider_op_'+op.opname, - self.default_consider_op) - resultcell = consider_meth(*argcells) - if resultcell is None: - resultcell = annmodel.SomeImpossibleValue() # no return value - elif resultcell == annmodel.SomeImpossibleValue(): - raise BlockedInference # the operation cannot succeed - assert isinstance(resultcell, annmodel.SomeObject) - assert isinstance(op.result, Variable) - self.setbinding(op.result, resultcell) # bind resultcell to op.result - - def default_consider_op(self, *args): - return annmodel.SomeObject() - - def _registeroperations(loc): - # All unary operations - for opname in annmodel.UNARY_OPERATIONS: - exec """ -def consider_op_%s(self, arg, *args): - return arg.%s(*args) -""" % (opname, opname) in globals(), loc - # All binary operations - for opname in annmodel.BINARY_OPERATIONS: - exec """ -def consider_op_%s(self, arg1, arg2, *args): - return pair(arg1,arg2).%s(*args) -""" % (opname, opname) in globals(), loc - - _registeroperations(locals()) - del _registeroperations - - def consider_op_newtuple(self, *args): - return annmodel.SomeTuple(items = args) - - def consider_op_newlist(self, *args): - factory = self.bookkeeper.getfactory(ListFactory) - for a in args: - factory.generalize(a) - return factory.create() - - def consider_op_newdict(self, *args): - assert not args, "XXX only supports newdict([])" - factory = self.bookkeeper.getfactory(DictFactory) - return factory.create() - - -class CannotSimplify(Exception): - pass diff --git a/pypy/translator/autopath.py b/pypy/translator/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/translator/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/translator/genc.h b/pypy/translator/genc.h deleted file mode 100644 index 5e0adf76cc..0000000000 --- a/pypy/translator/genc.h +++ /dev/null @@ -1,797 +0,0 @@ - -/************************************************************/ -/*** Generic C header section ***/ - -#include "Python.h" -#include "compile.h" -#include "frameobject.h" -#include "structmember.h" -#include "traceback.h" - -#if !defined(MIN) -#define MIN(a,b) (((a)<(b))?(a):(b)) -#endif /* MIN */ - -static PyObject *this_module_globals; - -/* Set this if you want call trace frames to be built */ -#if 0 -#define USE_CALL_TRACE -#endif - -#if 0 -#define OBNOXIOUS_PRINT_STATEMENTS -#endif - -#define op_bool(r,err,what) { \ - int _retval = what; \ - if (_retval < 0) { \ - FAIL(err) \ - } \ - r = PyBool_FromLong(_retval); \ - } - -#define op_richcmp(x,y,r,err,dir) \ - if (!(r=PyObject_RichCompare(x,y,dir))) FAIL(err) -#define OP_LT(x,y,r,err) op_richcmp(x,y,r,err, Py_LT) -#define OP_LE(x,y,r,err) op_richcmp(x,y,r,err, Py_LE) -#define OP_EQ(x,y,r,err) op_richcmp(x,y,r,err, Py_EQ) -#define OP_NE(x,y,r,err) op_richcmp(x,y,r,err, Py_NE) -#define OP_GT(x,y,r,err) op_richcmp(x,y,r,err, Py_GT) -#define OP_GE(x,y,r,err) op_richcmp(x,y,r,err, Py_GE) - -#define OP_IS_(x,y,r,err) op_bool(r,err,(x == y)) - -#define OP_IS_TRUE(x,r,err) op_bool(r,err,PyObject_IsTrue(x)) - -#define OP_LEN(x,r,err) { \ - int _retval = PyObject_Size(x); \ - if (_retval < 0) { \ - FAIL(err) \ - } \ - r = PyInt_FromLong(_retval); \ - } -#define OP_NEG(x,r,err) if (!(r=PyNumber_Negative(x))) FAIL(err) -#define OP_POS(x,r,err) if (!(r=PyNumber_Positive(x))) FAIL(err) -#define OP_INVERT(x,r,err) if (!(r=PyNumber_Invert(x))) FAIL(err) - -#define OP_ADD(x,y,r,err) if (!(r=PyNumber_Add(x,y))) FAIL(err) -#define OP_SUB(x,y,r,err) if (!(r=PyNumber_Subtract(x,y))) FAIL(err) -#define OP_MUL(x,y,r,err) if (!(r=PyNumber_Multiply(x,y))) FAIL(err) -#define OP_TRUEDIV(x,y,r,err) if (!(r=PyNumber_TrueDivide(x,y))) FAIL(err) -#define OP_FLOORDIV(x,y,r,err) if (!(r=PyNumber_FloorDivide(x,y)))FAIL(err) -#define OP_DIV(x,y,r,err) if (!(r=PyNumber_Divide(x,y))) FAIL(err) -#define OP_MOD(x,y,r,err) if (!(r=PyNumber_Remainder(x,y))) FAIL(err) -#define OP_POW(x,y,z,r,err) if (!(r=PyNumber_Power(x,y,z))) FAIL(err) -#define OP_LSHIFT(x,y,r,err) if (!(r=PyNumber_Lshift(x,y))) FAIL(err) -#define OP_RSHIFT(x,y,r,err) if (!(r=PyNumber_Rshift(x,y))) FAIL(err) -#define OP_AND_(x,y,r,err) if (!(r=PyNumber_And(x,y))) FAIL(err) -#define OP_OR_(x,y,r,err) if (!(r=PyNumber_Or(x,y))) FAIL(err) -#define OP_XOR(x,y,r,err) if (!(r=PyNumber_Xor(x,y))) FAIL(err) - -#define OP_INPLACE_ADD(x,y,r,err) if (!(r=PyNumber_InPlaceAdd(x,y))) \ - FAIL(err) -#define OP_INPLACE_SUB(x,y,r,err) if (!(r=PyNumber_InPlaceSubtract(x,y))) \ - FAIL(err) -#define OP_INPLACE_MUL(x,y,r,err) if (!(r=PyNumber_InPlaceMultiply(x,y))) \ - FAIL(err) -#define OP_INPLACE_TRUEDIV(x,y,r,err) if (!(r=PyNumber_InPlaceTrueDivide(x,y)))\ - FAIL(err) -#define OP_INPLACE_FLOORDIV(x,y,r,err)if(!(r=PyNumber_InPlaceFloorDivide(x,y)))\ - FAIL(err) -#define OP_INPLACE_DIV(x,y,r,err) if (!(r=PyNumber_InPlaceDivide(x,y))) \ - FAIL(err) -#define OP_INPLACE_MOD(x,y,r,err) if (!(r=PyNumber_InPlaceRemainder(x,y))) \ - FAIL(err) -#define OP_INPLACE_POW(x,y,r,err) if (!(r=PyNumber_InPlacePower(x,y,Py_None))) \ - FAIL(err) -#define OP_INPLACE_LSHIFT(x,y,r,err) if (!(r=PyNumber_InPlaceLshift(x,y))) \ - FAIL(err) -#define OP_INPLACE_RSHIFT(x,y,r,err) if (!(r=PyNumber_InPlaceRshift(x,y))) \ - FAIL(err) -#define OP_INPLACE_AND(x,y,r,err) if (!(r=PyNumber_InPlaceAnd(x,y))) \ - FAIL(err) -#define OP_INPLACE_OR(x,y,r,err) if (!(r=PyNumber_InPlaceOr(x,y))) \ - FAIL(err) -#define OP_INPLACE_XOR(x,y,r,err) if (!(r=PyNumber_InPlaceXor(x,y))) \ - FAIL(err) - -#define OP_GETITEM(x,y,r,err) if (!(r=PyObject_GetItem1(x,y))) FAIL(err) -#define OP_SETITEM(x,y,z,r,err) if ((PyObject_SetItem1(x,y,z))<0) FAIL(err) \ - r=Py_None; Py_INCREF(r); -#define OP_DELITEM(x,y,r,err) if ((PyObject_DelItem(x,y))<0) FAIL(err) \ - r=Py_None; Py_INCREF(r); -#define OP_CONTAINS(x,y,r,err) op_bool(r,err,(PySequence_Contains(x,y))) - -#define OP_GETATTR(x,y,r,err) if (!(r=PyObject_GetAttr(x,y))) FAIL(err) -#define OP_SETATTR(x,y,z,r,err) if ((PyObject_SetAttr(x,y,z))<0) FAIL(err) \ - r=Py_None; Py_INCREF(r); -#define OP_DELATTR(x,y,r,err) if ((PyObject_SetAttr(x,y,NULL))<0)FAIL(err) \ - r=Py_None; Py_INCREF(r); - -#define OP_NEWSLICE(x,y,z,r,err) if (!(r=PySlice_New(x,y,z))) FAIL(err) - -#define OP_GETSLICE(x,y,z,r,err) { \ - PyObject *__yo = y, *__zo = z; \ - int __y = 0, __z = INT_MAX; \ - if (__yo == Py_None) __yo = NULL; \ - if (__zo == Py_None) __zo = NULL; \ - if (!_PyEval_SliceIndex(__yo, &__y) || \ - !_PyEval_SliceIndex(__zo, &__z) || \ - !(r=PySequence_GetSlice(x, __y, __z))) FAIL(err) \ - } - -#define OP_ALLOC_AND_SET(x,y,r,err) { \ - /* XXX check for long/int overflow */ \ - int __i, __x = PyInt_AsLong(x); \ - if (PyErr_Occurred()) FAIL(err) \ - if (!(r = PyList_New(__x))) FAIL(err) \ - for (__i=0; __i<__x; __i++) { \ - Py_INCREF(y); \ - PyList_SET_ITEM(r, __i, y); \ - } \ - } - -#define OP_ITER(x,r,err) if (!(r=PyObject_GetIter(x))) FAIL(err) -#define OP_NEXT(x,r,err) if (!(r=PyIter_Next(x))) { \ - if (!PyErr_Occurred()) PyErr_SetNone(PyExc_StopIteration); \ - FAIL(err) \ - } - -#define OP_SIMPLE_CALL(args,r,err) if (!(r=PyObject_CallFunctionObjArgs args)) \ - FAIL(err) -#define OP_CALL_ARGS(args,r,err) if (!(r=CallWithShape args)) FAIL(err) - -/* Needs to act like getattr(x, '__class__', type(x)) */ -#define OP_TYPE(x,r,err) { \ - PyObject *o = x; \ - if (PyInstance_Check(o)) { \ - r = (PyObject*)(((PyInstanceObject*)o)->in_class); \ - } else { \ - r = (PyObject*)o->ob_type; \ - } \ - Py_INCREF(r); \ - } - -/* Needs to act like instance(x,y) */ -#define OP_ISSUBTYPE(x,y,r,err) \ - op_bool(r,err,PyClass_IsSubclass(x, y)) - -/*** tests ***/ - -#define EQ_False(o) (o == Py_False) -#define EQ_True(o) (o == Py_True) -#define EQ_0(o) (PyInt_Check(o) && PyInt_AS_LONG(o)==0) -#define EQ_1(o) (PyInt_Check(o) && PyInt_AS_LONG(o)==1) - - -/*** misc ***/ - -#define MOVE(x, y) y = x; - -#define INITCHK(expr) if (!(expr)) return; -#define REGISTER_GLOBAL(name) Py_INCREF(name); PyModule_AddObject(m, #name, name); - - -/*** classes ***/ - -/*#define SETUP_CLASS(t, name, base) \ - t = PyObject_CallFunction((PyObject*) &PyType_Type, \ - "s(O){}", name, base)*/ - -#define SETUP_CLASS_ATTR(t, attr, value) \ - (PyObject_SetAttrString(t, attr, value) >= 0) - -/*** instances ***/ - -#define SETUP_INSTANCE_ATTR(t, attr, value) \ - (PyObject_SetAttrString(t, attr, value) >= 0) - -#define SETUP_INSTANCE(i, cls) \ - (PyType_Check(cls) ? \ - (i = PyType_GenericAlloc((PyTypeObject *)cls, 0)) : \ - (i = PyInstance_NewRaw(cls, NULL))) - - -#if defined(USE_CALL_TRACE) - -#define TRACE_CALL __f, __tstate, -#define TRACE_ARGS PyFrameObject *__f, PyThreadState *__tstate, -#define TRACE_CALL_VOID __f, __tstate -#define TRACE_ARGS_VOID PyFrameObject *__f, PyThreadState *__tstate - -#define FAIL(err) { __f->f_lineno = __f->f_code->co_firstlineno = __LINE__; goto err; } - -#define FUNCTION_HEAD(signature, self, args, names, file, line) \ - PyThreadState *__tstate = PyThreadState_GET(); \ - PyObject *__localnames = PyList_CrazyStringPack names; \ - PyFrameObject *__f = traced_function_head(self, args, signature, file, line, __tstate, __localnames); - -#define FUNCTION_CHECK() \ - assert (__f != NULL); - -#define ERR_DECREF(arg) { if (__f->f_locals) { PyDict_SetItemString(__f->f_locals, #arg, arg); } Py_DECREF(arg); } - -#define FUNCTION_RETURN(rval) return traced_function_tail(rval, __f, __tstate); - -#else /* !defined(USE_CALL_TRACE) */ - -#define TRACE_CALL /* nothing */ -#define TRACE_ARGS /* nothing */ -#define TRACE_CALL_VOID /* nothing */ -#define TRACE_ARGS_VOID void - -#define FAIL(err) { goto err; } - -#define FUNCTION_HEAD(signature, self, args, names, file, line) - -#define ERR_DECREF(arg) { Py_DECREF(arg); } - -#define FUNCTION_CHECK() - -#define FUNCTION_RETURN(rval) return rval; - -#endif /* defined(USE_CALL_TRACE) */ - - - - - -/* we need a subclass of 'builtin_function_or_method' which can be used - as methods: builtin function objects that can be bound on instances */ -static PyObject * -gencfunc_descr_get(PyObject *func, PyObject *obj, PyObject *type) -{ - if (obj == Py_None) - obj = NULL; - return PyMethod_New(func, obj, type); -} -static PyTypeObject PyGenCFunction_Type = { - PyObject_HEAD_INIT(NULL) - 0, - "pypy_generated_function", - sizeof(PyCFunctionObject), - 0, - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - /*&PyCFunction_Type set below*/ 0, /* tp_base */ - 0, /* tp_dict */ - gencfunc_descr_get, /* tp_descr_get */ - 0, /* tp_descr_set */ -}; - -#define MODULE_INITFUNC(modname) \ - static PyMethodDef no_methods[] = { (char *)NULL, (PyCFunction)NULL }; \ - void init##modname(void) - -#define SETUP_MODULE(modname) \ - PyObject *m = Py_InitModule(#modname, no_methods); \ - PyModule_AddStringConstant(m, "__sourcefile__", __FILE__); \ - this_module_globals = PyModule_GetDict(m); \ - PyGenCFunction_Type.tp_base = &PyCFunction_Type; \ - PyType_Ready(&PyGenCFunction_Type); - - -/*** operations with a variable number of arguments ***/ - -#define OP_NEWLIST0(r,err) if (!(r=PyList_New(0))) FAIL(err) -#define OP_NEWLIST(args,r,err) if (!(r=PyList_Pack args)) FAIL(err) -#define OP_NEWDICT0(r,err) if (!(r=PyDict_New())) FAIL(err) -#define OP_NEWDICT(args,r,err) if (!(r=PyDict_Pack args)) FAIL(err) -#define OP_NEWTUPLE(args,r,err) if (!(r=PyTuple_Pack args)) FAIL(err) - -static PyObject* PyList_Pack(int n, ...) -{ - int i; - PyObject *o; - PyObject *result; - va_list vargs; - - va_start(vargs, n); - result = PyList_New(n); - if (result == NULL) { - return NULL; - } - for (i = 0; i < n; i++) { - o = va_arg(vargs, PyObject *); - Py_INCREF(o); - PyList_SET_ITEM(result, i, o); - } - va_end(vargs); - return result; -} - -static PyObject* PyDict_Pack(int n, ...) -{ - int i; - PyObject *key, *val; - PyObject *result; - va_list vargs; - - va_start(vargs, n); - result = PyDict_New(); - if (result == NULL) { - return NULL; - } - for (i = 0; i < n; i++) { - key = va_arg(vargs, PyObject *); - val = va_arg(vargs, PyObject *); - if (PyDict_SetItem(result, key, val) < 0) { - Py_DECREF(result); - return NULL; - } - } - va_end(vargs); - return result; -} - -#if PY_VERSION_HEX < 0x02040000 /* 2.4 */ -static PyObject* PyTuple_Pack(int n, ...) -{ - int i; - PyObject *o; - PyObject *result; - PyObject **items; - va_list vargs; - - va_start(vargs, n); - result = PyTuple_New(n); - if (result == NULL) { - return NULL; - } - items = ((PyTupleObject *)result)->ob_item; - for (i = 0; i < n; i++) { - o = va_arg(vargs, PyObject *); - Py_INCREF(o); - items[i] = o; - } - va_end(vargs); - return result; -} -#endif - -#if PY_VERSION_HEX >= 0x02030000 /* 2.3 */ -# define PyObject_GetItem1 PyObject_GetItem -# define PyObject_SetItem1 PyObject_SetItem -#else -/* for Python 2.2 only */ -static PyObject* PyObject_GetItem1(PyObject* obj, PyObject* index) -{ - int start, stop, step; - if (!PySlice_Check(index)) { - return PyObject_GetItem(obj, index); - } - if (((PySliceObject*) index)->start == Py_None) { - start = -INT_MAX-1; - } else { - start = PyInt_AsLong(((PySliceObject*) index)->start); - if (start == -1 && PyErr_Occurred()) { - return NULL; - } - } - if (((PySliceObject*) index)->stop == Py_None) { - stop = INT_MAX; - } else { - stop = PyInt_AsLong(((PySliceObject*) index)->stop); - if (stop == -1 && PyErr_Occurred()) { - return NULL; - } - } - if (((PySliceObject*) index)->step != Py_None) { - step = PyInt_AsLong(((PySliceObject*) index)->step); - if (step == -1 && PyErr_Occurred()) { - return NULL; - } - if (step != 1) { - PyErr_SetString(PyExc_ValueError, - "obj[slice]: no step allowed"); - return NULL; - } - } - return PySequence_GetSlice(obj, start, stop); -} -static PyObject* PyObject_SetItem1(PyObject* obj, PyObject* index, PyObject* v) -{ - int start, stop, step; - if (!PySlice_Check(index)) { - return PyObject_SetItem(obj, index, v); - } - if (((PySliceObject*) index)->start == Py_None) { - start = -INT_MAX-1; - } else { - start = PyInt_AsLong(((PySliceObject*) index)->start); - if (start == -1 && PyErr_Occurred()) { - return NULL; - } - } - if (((PySliceObject*) index)->stop == Py_None) { - stop = INT_MAX; - } else { - stop = PyInt_AsLong(((PySliceObject*) index)->stop); - if (stop == -1 && PyErr_Occurred()) { - return NULL; - } - } - if (((PySliceObject*) index)->step != Py_None) { - step = PyInt_AsLong(((PySliceObject*) index)->step); - if (step == -1 && PyErr_Occurred()) { - return NULL; - } - if (step != 1) { - PyErr_SetString(PyExc_ValueError, - "obj[slice]: no step allowed"); - return NULL; - } - } - return PySequence_SetSlice(obj, start, stop, v); -} -#endif - -static PyObject* skipped(PyObject* self, PyObject* args) -{ - PyErr_Format(PyExc_AssertionError, - "calling the skipped function '%s'", - (((PyCFunctionObject *)self) -> m_ml -> ml_name)); - return NULL; -} - -static PyObject* CallWithShape(PyObject* callable, PyObject* shape, ...) -{ - /* XXX the 'shape' argument is a tuple as specified by - XXX pypy.interpreter.argument.fromshape(). This code should - XXX we made independent on the format of the 'shape' later... */ - PyObject* result = NULL; - PyObject* t = NULL; - PyObject* d = NULL; - PyObject* o; - PyObject* key; - PyObject* t2; - int i, nargs, nkwds, nvarargs, starflag; - va_list vargs; - - if (!PyTuple_Check(shape) || - PyTuple_GET_SIZE(shape) != 3 || - !PyInt_Check(PyTuple_GET_ITEM(shape, 0)) || - !PyTuple_Check(PyTuple_GET_ITEM(shape, 1)) || - !PyInt_Check(PyTuple_GET_ITEM(shape, 2))) { - Py_FatalError("in genc.h: invalid 'shape' argument"); - } - nargs = PyInt_AS_LONG(PyTuple_GET_ITEM(shape, 0)); - nkwds = PyTuple_GET_SIZE(PyTuple_GET_ITEM(shape, 1)); - starflag = PyInt_AS_LONG(PyTuple_GET_ITEM(shape, 2)); - - va_start(vargs, shape); - t = PyTuple_New(nargs); - if (t == NULL) - goto finally; - for (i = 0; i < nargs; i++) { - o = va_arg(vargs, PyObject *); - Py_INCREF(o); - PyTuple_SET_ITEM(t, i, o); - } - if (nkwds) { - d = PyDict_New(); - if (d == NULL) - goto finally; - for (i = 0; i < nkwds; i++) { - o = va_arg(vargs, PyObject *); - key = PyTuple_GET_ITEM(PyTuple_GET_ITEM(shape, 1), i); - if (PyDict_SetItem(d, key, o) < 0) - goto finally; - } - } - if (starflag) { - o = va_arg(vargs, PyObject *); - o = PySequence_Tuple(o); - if (o == NULL) - goto finally; - t2 = PySequence_Concat(t, o); - Py_DECREF(o); - Py_DECREF(t); - t = t2; - if (t == NULL) - goto finally; - } - va_end(vargs); - - result = PyObject_Call(callable, t, d); - - finally: - Py_XDECREF(d); - Py_XDECREF(t); - return result; -} - - -#if defined(USE_CALL_TRACE) - -static int callstack_depth = -1; -static PyCodeObject* getcode(char *func_name, char *func_filename, int lineno); -static int trace_frame(PyThreadState *tstate, PyFrameObject *f, int code, PyObject *val); -static int trace_frame_exc(PyThreadState *tstate, PyFrameObject *f); - -static int -trace_frame(PyThreadState *tstate, PyFrameObject *f, int code, PyObject *val) -{ - int result = 0; - if (!tstate->use_tracing || tstate->tracing) { - /*printf("if (!tstate->use_tracing || tstate->tracing)\n");*/ - return 0; - } - if (tstate->c_profilefunc != NULL) { - /*printf("if (tstate->c_profilefunc != NULL)\n");*/ - tstate->tracing++; - result = tstate->c_profilefunc(tstate->c_profileobj, - f, code , val); - tstate->use_tracing = ((tstate->c_tracefunc != NULL) - || (tstate->c_profilefunc != NULL)); - tstate->tracing--; - if (result) { - /*printf(" if (result)\n");*/ - return result; - } - } - if (tstate->c_tracefunc != NULL) { - /*printf("if (tstate->c_tracefunc != NULL)\n");*/ - tstate->tracing++; - result = tstate->c_tracefunc(tstate->c_traceobj, - f, code , val); - tstate->use_tracing = ((tstate->c_tracefunc != NULL) - || (tstate->c_profilefunc != NULL)); - tstate->tracing--; - } - /*printf("return result;\n");*/ - return result; -} - -static int -trace_frame_exc(PyThreadState *tstate, PyFrameObject *f) -{ - PyObject *type, *value, *traceback, *arg; - int err; - - if (tstate->c_tracefunc == NULL) { - return 0; - } - - PyErr_Fetch(&type, &value, &traceback); - if (value == NULL) { - value = Py_None; - Py_INCREF(value); - } - arg = PyTuple_Pack(3, type, value, traceback); - if (arg == NULL) { - PyErr_Restore(type, value, traceback); - return 0; - } - err = trace_frame(tstate, f, PyTrace_EXCEPTION, arg); - Py_DECREF(arg); - if (err == 0) { - PyErr_Restore(type, value, traceback); - } else { - Py_XDECREF(type); - Py_XDECREF(value); - Py_XDECREF(traceback); - } - return err; -} - -static PyCodeObject* -getcode(char *func_name, char *func_filename, int lineno) -{ - PyObject *code = NULL; - PyObject *name = NULL; - PyObject *nulltuple = NULL; - PyObject *filename = NULL; - PyCodeObject *tb_code = NULL; -#if defined(OBNOXIOUS_PRINT_STATEMENTS) - int i; - - printf("%5d: ", lineno); - assert(callstack_depth >= 0); - if (callstack_depth) { - for (i=0; i<callstack_depth; ++i) { - printf(" "); - } - } - printf("%s\n", func_name); -#endif /* !defined(OBNOXIOUS_PRINT_STATEMENTS) */ - - code = PyString_FromString(""); - if (code == NULL) - goto failed; - name = PyString_FromString(func_name); - if (name == NULL) - goto failed; - nulltuple = PyTuple_New(0); - if (nulltuple == NULL) - goto failed; - filename = PyString_FromString(func_filename); - tb_code = PyCode_New(0, /* argcount */ - 0, /* nlocals */ - 0, /* stacksize */ - 0, /* flags */ - code, /* code */ - nulltuple, /* consts */ - nulltuple, /* names */ - nulltuple, /* varnames */ - nulltuple, /* freevars */ - nulltuple, /* cellvars */ - filename, /* filename */ - name, /* name */ - lineno, /* firstlineno */ - code /* lnotab */ - ); - if (tb_code == NULL) - goto failed; - Py_DECREF(code); - Py_DECREF(nulltuple); - Py_DECREF(filename); - Py_DECREF(name); - return tb_code; -failed: - Py_XDECREF(code); - Py_XDECREF(name); - return NULL; -} - -static PyFrameObject *traced_function_head(PyObject *function, PyObject *args, char *c_signature, char *filename, int c_lineno, PyThreadState *tstate, PyObject *extra_local_names) { - /* - STEALS a reference to extra_local_names if not NULL - */ - - PyCodeObject *c; - PyFrameObject *f; - PyObject *locals; - PyObject *locals_signature; - PyObject *locals_lineno; - PyObject *locals_filename; - - assert(function && args && tstate); - - locals = PyDict_New(); - locals_signature = PyString_FromString(c_signature); - locals_lineno = PyInt_FromLong(c_lineno); - locals_filename = PyString_FromString(filename); - if (locals == NULL || function == NULL || args == NULL || - locals_signature == NULL || locals_lineno == NULL || - locals_filename == NULL) { - Py_XDECREF(locals); - Py_XDECREF(locals_signature); - Py_XDECREF(locals_lineno); - Py_XDECREF(locals_filename); - return NULL; - } - PyDict_SetItemString(locals, "function", function); - PyDict_SetItemString(locals, "args", args); - PyDict_SetItemString(locals, "signature", locals_signature); - PyDict_SetItemString(locals, "lineno", locals_lineno); - PyDict_SetItemString(locals, "filename", locals_filename); - Py_DECREF(locals_signature); - Py_DECREF(locals_lineno); - Py_DECREF(locals_filename); - if (extra_local_names != NULL) { - int max_locals = MIN(PyList_Size(extra_local_names), PyTuple_Size(args)); - int i; - for (i = 0; i < max_locals; ++i) { - PyDict_SetItem(locals, PyList_GET_ITEM(extra_local_names, i), PyTuple_GET_ITEM(args, i)); - } - Py_DECREF(extra_local_names); - } - - callstack_depth++; - c = getcode(c_signature, filename, c_lineno); - if (c == NULL) { - Py_DECREF(locals); - callstack_depth--; - return NULL; - } - f = PyFrame_New(tstate, c, this_module_globals, locals); - if (f == NULL) { - callstack_depth--; - return NULL; - } - Py_DECREF(c); - Py_DECREF(locals); - tstate->frame = f; - if (trace_frame(tstate, f, PyTrace_CALL, Py_None) < 0) { - Py_DECREF(args); - callstack_depth--; - return NULL; - } - - return f; -} - -static PyObject *traced_function_tail(PyObject *rval, PyFrameObject *f, PyThreadState *tstate) { - /* - STEALS a reference to f - */ - if (f == NULL) { - goto bad_args; - } - if (rval == NULL) { - if (tstate->curexc_traceback == NULL) { - PyTraceBack_Here(f); - } - if (trace_frame_exc(tstate, f) < 0) { - goto end; - } - } else { - if (trace_frame(tstate, f, PyTrace_RETURN, rval) < 0) { - Py_DECREF(rval); - rval = NULL; - } - } -end: - tstate->frame = f->f_back; - Py_DECREF(f); -bad_args: - callstack_depth--; - return rval; -} - -static PyObject* PyList_CrazyStringPack(char *begin, ...) -{ - PyObject *o; - PyObject *result; - va_list vargs; - - result = PyList_New(0); - if (result == NULL || begin == NULL) { - return result; - } - va_start(vargs, begin); - o = PyString_FromString(begin); - if (o == NULL) { - Py_XDECREF(result); - return NULL; - } - if (PyList_Append(result, o) == -1) { - Py_DECREF(o); - Py_XDECREF(result); - return result; - } - Py_DECREF(o); - while ((begin = va_arg(vargs, char *)) != NULL) { - o = PyString_FromString(begin); - if (o == NULL) { - Py_XDECREF(result); - return NULL; - } - if (PyList_Append(result, o) == -1) { - Py_DECREF(o); - Py_XDECREF(result); - return NULL; - } - Py_DECREF(o); - } - va_end(vargs); - return result; -} - -#endif /* defined(USE_CALL_TRACE) */ diff --git a/pypy/translator/genc.py b/pypy/translator/genc.py deleted file mode 100644 index 3e7a2d43f5..0000000000 --- a/pypy/translator/genc.py +++ /dev/null @@ -1,851 +0,0 @@ -""" -Generate a C source file from the flowmodel. - -""" -from __future__ import generators -import autopath, os, sys -from pypy.objspace.flow.model import Variable, Constant, SpaceOperation -from pypy.objspace.flow.model import FunctionGraph, Block, Link -from pypy.objspace.flow.model import last_exception, last_exc_value -from pypy.objspace.flow.model import traverse, uniqueitems, checkgraph -from pypy.translator.simplify import remove_direct_loops -from pypy.interpreter.pycode import CO_VARARGS -from pypy.annotation import model as annmodel -from types import FunctionType, CodeType - -from pypy.objspace.std.restricted_int import r_int, r_uint - -# ____________________________________________________________ - -def c_string(s): - return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\"'),) - -def uniquemodulename(name, SEEN={}): - # never reuse the same module name within a Python session! - i = 0 - while True: - i += 1 - result = '%s_%d' % (name, i) - if result not in SEEN: - SEEN[result] = True - return result - -def go_figure_out_this_name(source): - # ahem - return 'PyRun_String("%s", Py_eval_input, PyEval_GetGlobals(), NULL)' % ( - source, ) - - -class GenC: - MODNAMES = {} - - def __init__(self, f, translator, modname=None, f2=None, f2name=None): - self.f = f - self.f2 = f2 - self.f2name = f2name - self.translator = translator - self.modname = (modname or - uniquemodulename(translator.functions[0].__name__)) - self.cnames = {Constant(None).key: 'Py_None', - Constant(False).key: 'Py_False', - Constant(True).key: 'Py_True', - } - self.seennames = {} - self.initcode = [] # list of lines for the module's initxxx() - self.latercode = [] # list of generators generating extra lines - # for later in initxxx() -- for recursive - # objects - self.globaldecl = [] - self.globalobjects = [] - self.pendingfunctions = [] - self.debugstack = () # linked list of nested nameof() - self.gen_source() - - def nameof(self, obj, debug=None): - key = Constant(obj).key - try: - return self.cnames[key] - except KeyError: - if debug: - stackentry = debug, obj - else: - stackentry = obj - self.debugstack = (self.debugstack, stackentry) - if (type(obj).__module__ != '__builtin__' and - not isinstance(obj, type)): # skip user-defined metaclasses - # assume it's a user defined thingy - name = self.nameof_instance(obj) - else: - for cls in type(obj).__mro__: - meth = getattr(self, - 'nameof_' + cls.__name__.replace(' ', ''), - None) - if meth: - break - else: - raise Exception, "nameof(%r)" % (obj,) - name = meth(obj) - self.debugstack, x = self.debugstack - assert x is stackentry - self.cnames[key] = name - return name - - def uniquename(self, basename): - basename = basename.translate(C_IDENTIFIER) - n = self.seennames.get(basename, 0) - self.seennames[basename] = n+1 - if n == 0: - self.globalobjects.append(basename) - self.globaldecl.append('static PyObject *%s;' % (basename,)) - return basename - else: - return self.uniquename('%s_%d' % (basename, n)) - - def nameof_object(self, value): - if type(value) is not object: - raise Exception, "nameof(%r)" % (value,) - name = self.uniquename('g_object') - self.initcode.append('INITCHK(%s = PyObject_CallFunction((PyObject*)&PyBaseObject_Type, ""))'%name) - return name - - def nameof_module(self, value): - assert value is os or not hasattr(value, "__file__") or \ - not (value.__file__.endswith('.pyc') or - value.__file__.endswith('.py') or - value.__file__.endswith('.pyo')), \ - "%r is not a builtin module (probably :)"%value - name = self.uniquename('mod%s'%value.__name__) - self.initcode.append('INITCHK(%s = PyImport_ImportModule("%s"))'%(name, value.__name__)) - return name - - - def nameof_int(self, value): - if value >= 0: - name = 'gint_%d' % value - else: - name = 'gint_minus%d' % abs(value) - name = self.uniquename(name) - self.initcode.append('INITCHK(%s = ' - 'PyInt_FromLong(%d))' % (name, value)) - return name - - def nameof_long(self, value): - assert type(int(value)) is int, "your literal long is too long" - if value >= 0: - name = 'glong%d' % value - else: - name = 'glong_minus%d' % abs(value) - name = self.uniquename(name) - self.initcode.append('INITCHK(%s = ' - 'PyLong_FromLong(%d))' % (name, value)) - return name - - def nameof_float(self, value): - name = 'gfloat_%s' % value - name = (name.replace('-', 'minus') - .replace('.', 'dot')) - name = self.uniquename(name) - self.initcode.append('INITCHK(%s = ' - 'PyFloat_FromDouble(%r))' % (name, value)) - return name - - def nameof_str(self, value): - name = self.uniquename('gstr_' + value[:32]) - if [c for c in value if c<' ' or c>'~' or c=='"' or c=='\\']: - # non-printable string - s = 'chr_%s' % name - self.globaldecl.append('static char %s[] = { %s };' % ( - s, ', '.join(['%d' % ord(c) for c in value]))) - else: - # printable string - s = '"%s"' % value - self.initcode.append('INITCHK(%s = PyString_FromStringAndSize(' - '%s, %d))' % (name, s, len(value))) - return name - - def skipped_function(self, func): - # debugging only! Generates a placeholder for missing functions - # that raises an exception when called. - name = self.uniquename('gskippedfunc_' + func.__name__) - self.globaldecl.append('static PyMethodDef ml_%s = { "%s", &skipped, METH_VARARGS };' % (name, name)) - self.initcode.append('INITCHK(%s = PyCFunction_New(' - '&ml_%s, NULL))' % (name, name)) - self.initcode.append('\tPy_INCREF(%s);' % name) - self.initcode.append('\tPyCFunction_GET_SELF(%s) = %s;' % (name, name)) - return name - - def nameof_function(self, func, progress=['-\x08', '\\\x08', - '|\x08', '/\x08']): - printable_name = '(%s:%d) %s' % ( - func.func_globals.get('__name__', '?'), - func.func_code.co_firstlineno, - func.__name__) - if self.translator.frozen: - if func not in self.translator.flowgraphs: - print "NOT GENERATING", printable_name - return self.skipped_function(func) - else: - if (func.func_doc and - func.func_doc.lstrip().startswith('NOT_RPYTHON')): - print "skipped", printable_name - return self.skipped_function(func) - p = progress.pop(0) - sys.stderr.write(p) - progress.append(p) - name = self.uniquename('gfunc_' + func.__name__) - self.initcode.append('INITCHK(%s = PyCFunction_New(' - '&ml_%s, NULL))' % (name, name)) - self.initcode.append('\t%s->ob_type = &PyGenCFunction_Type;' % name) - self.pendingfunctions.append(func) - return name - - def nameof_staticmethod(self, sm): - # XXX XXX XXXX - func = sm.__get__(42.5) - name = self.uniquename('gsm_' + func.__name__) - functionname = self.nameof(func) - self.initcode.append('INITCHK(%s = PyCFunction_New(' - '&ml_%s, NULL))' % (name, functionname)) - return name - - def nameof_instancemethod(self, meth): - if meth.im_self is None: - # no error checking here - return self.nameof(meth.im_func) - else: - ob = self.nameof(meth.im_self) - func = self.nameof(meth.im_func) - typ = self.nameof(meth.im_class) - name = self.uniquename('gmeth_'+meth.im_func.__name__) - self.initcode.append( - 'INITCHK(%s = gencfunc_descr_get(%s, %s, %s))'%( - name, func, ob, typ)) - return name - - def should_translate_attr(self, pbc, attr): - ann = self.translator.annotator - if ann is None: - ignore = getattr(pbc.__class__, 'NOT_RPYTHON_ATTRIBUTES', []) - if attr in ignore: - return False - else: - return "probably" # True - classdef = ann.getuserclasses().get(pbc.__class__) - if classdef and classdef.about_attribute(attr) is not None: - return True - return False - - def later(self, gen): - self.latercode.append((gen, self.debugstack)) - - def nameof_instance(self, instance): - name = self.uniquename('ginst_' + instance.__class__.__name__) - cls = self.nameof(instance.__class__) - def initinstance(): - content = instance.__dict__.items() - content.sort() - for key, value in content: - if self.should_translate_attr(instance, key): - yield 'INITCHK(SETUP_INSTANCE_ATTR(%s, "%s", %s))' % ( - name, key, self.nameof(value)) - self.initcode.append('INITCHK(SETUP_INSTANCE(%s, %s))' % ( - name, cls)) - self.later(initinstance()) - return name - - def nameof_builtin_function_or_method(self, func): - if func.__self__ is None: - # builtin function - # where does it come from? Python2.2 doesn't have func.__module__ - for modname, module in sys.modules.items(): - if hasattr(module, '__file__'): - if (module.__file__.endswith('.py') or - module.__file__.endswith('.pyc') or - module.__file__.endswith('.pyo')): - continue # skip non-builtin modules - if func is getattr(module, func.__name__, None): - break - else: - raise Exception, '%r not found in any built-in module' % (func,) - name = self.uniquename('gbltin_' + func.__name__) - if modname == '__builtin__': - self.initcode.append('INITCHK(%s = PyMapping_GetItemString(' - 'PyEval_GetBuiltins(), "%s"))' % ( - name, func.__name__)) - else: - self.initcode.append('INITCHK(%s = PyObject_GetAttrString(' - '%s, "%s"))' % ( - name, self.nameof(module), func.__name__)) - else: - # builtin (bound) method - name = self.uniquename('gbltinmethod_' + func.__name__) - self.initcode.append('INITCHK(%s = PyObject_GetAttrString(' - '%s, "%s"))' % ( - name, self.nameof(func.__self__), func.__name__)) - return name - - def nameof_classobj(self, cls): - if cls.__doc__ and cls.__doc__.lstrip().startswith('NOT_RPYTHON'): - raise Exception, "%r should never be reached" % (cls,) - - metaclass = "&PyType_Type" - if issubclass(cls, Exception): - if cls.__module__ == 'exceptions': - return 'PyExc_%s'%cls.__name__ - #else: - # # exceptions must be old-style classes (grr!) - # metaclass = "&PyClass_Type" - # For the moment, use old-style classes exactly when the - # pypy source uses old-style classes, to avoid strange problems. - if not isinstance(cls, type): - assert type(cls) is type(Exception) - metaclass = "&PyClass_Type" - - name = self.uniquename('gcls_' + cls.__name__) - basenames = [self.nameof(base) for base in cls.__bases__] - def initclassobj(): - content = cls.__dict__.items() - content.sort() - for key, value in content: - if key.startswith('__'): - if key in ['__module__', '__doc__', '__dict__', - '__weakref__', '__repr__', '__metaclass__']: - continue - # XXX some __NAMES__ are important... nicer solution sought - #raise Exception, "unexpected name %r in class %s"%(key, cls) - if isinstance(value, staticmethod) and value.__get__(1) not in self.translator.flowgraphs and self.translator.frozen: - print value - continue - if isinstance(value, FunctionType) and value not in self.translator.flowgraphs and self.translator.frozen: - print value - continue - - yield 'INITCHK(SETUP_CLASS_ATTR(%s, "%s", %s))' % ( - name, key, self.nameof(value)) - - baseargs = ", ".join(basenames) - if baseargs: - baseargs = ', '+baseargs - self.initcode.append('INITCHK(%s = PyObject_CallFunction((PyObject*) %s,' - %(name, metaclass)) - self.initcode.append('\t\t"s(%s){}", "%s"%s))' - %("O"*len(basenames), cls.__name__, baseargs)) - - self.later(initclassobj()) - return name - - nameof_class = nameof_classobj # for Python 2.2 - - typename_mapping = { - object: '&PyBaseObject_Type', - int: '&PyInt_Type', - long: '&PyLong_Type', - bool: '&PyBool_Type', - list: '&PyList_Type', - tuple: '&PyTuple_Type', - dict: '&PyDict_Type', - str: '&PyString_Type', - float: '&PyFloat_Type', - type(Exception()): '&PyInstance_Type', - type: '&PyType_Type', - complex:'&PyComplex_Type', - unicode:'&PyUnicode_Type', - file: '&PyFile_Type', - type(None): 'Py_None->ob_type', - CodeType: '&PyCode_Type', - - r_int: '&PyInt_Type', - r_uint: '&PyInt_Type', - - # XXX we leak 5 references here, but that's the least of the - # problems with this section of code - # type 'builtin_function_or_method': - type(len): go_figure_out_this_name('type(len)'), - # type 'method_descriptor': - type(list.append): go_figure_out_this_name('type(list.append)'), - # type 'wrapper_descriptor': - type(type(None).__repr__): go_figure_out_this_name( - 'type(type(None).__repr__)'), - # type 'getset_descriptor': - type(type.__dict__['__dict__']): go_figure_out_this_name( - "type(type.__dict__['__dict__'])"), - # type 'member_descriptor': - type(type.__dict__['__basicsize__']): go_figure_out_this_name( - "type(type.__dict__['__basicsize__'])"), - } - - def nameof_type(self, cls): - if cls in self.typename_mapping: - return '(PyObject*) %s' % self.typename_mapping[cls] - assert cls.__module__ != '__builtin__', \ - "built-in class %r not found in typename_mapping" % (cls,) - return self.nameof_classobj(cls) - - def nameof_tuple(self, tup): - name = self.uniquename('g%dtuple' % len(tup)) - args = [self.nameof(x) for x in tup] - args.insert(0, '%d' % len(tup)) - args = ', '.join(args) - self.initcode.append('INITCHK(%s = PyTuple_Pack(%s))' % (name, args)) - return name - - def nameof_list(self, lis): - name = self.uniquename('g%dlist' % len(lis)) - def initlist(): - for i in range(len(lis)): - item = self.nameof(lis[i]) - yield '\tPy_INCREF(%s);' % item - yield '\tPyList_SET_ITEM(%s, %d, %s);' % (name, i, item) - self.initcode.append('INITCHK(%s = PyList_New(%d))' % (name, len(lis))) - self.later(initlist()) - return name - - def nameof_dict(self, dic): - assert dic is not __builtins__ - assert '__builtins__' not in dic, 'Seems to be the globals of %s' % ( - dic.get('__name__', '?'),) - name = self.uniquename('g%ddict' % len(dic)) - def initdict(): - for k in dic: - if type(k) is str: - yield ('\tINITCHK(PyDict_SetItemString' - '(%s, "%s", %s) >= 0)'%( - name, k, self.nameof(dic[k]))) - else: - yield ('\tINITCHK(PyDict_SetItem' - '(%s, %s, %s) >= 0)'%( - name, self.nameof(k), self.nameof(dic[k]))) - self.initcode.append('INITCHK(%s = PyDict_New())' % (name,)) - self.later(initdict()) - return name - - # strange prebuilt instances below, don't look too closely - # XXX oh well. - def nameof_member_descriptor(self, md): - name = self.uniquename('gdescriptor_%s_%s' % ( - md.__objclass__.__name__, md.__name__)) - cls = self.nameof(md.__objclass__) - self.initcode.append('INITCHK(PyType_Ready((PyTypeObject*) %s) >= 0)' % - cls) - self.initcode.append('INITCHK(%s = PyMapping_GetItemString(' - '((PyTypeObject*) %s)->tp_dict, "%s"))' % - (name, cls, md.__name__)) - return name - nameof_getset_descriptor = nameof_member_descriptor - nameof_method_descriptor = nameof_member_descriptor - nameof_wrapper_descriptor = nameof_member_descriptor - - def nameof_file(self, fil): - if fil is sys.stdin: - return 'PySys_GetObject("stdin")' - if fil is sys.stdout: - return 'PySys_GetObject("stdout")' - if fil is sys.stderr: - return 'PySys_GetObject("stderr")' - raise Exception, 'Cannot translate an already-open file: %r' % (fil,) - - def gen_source(self): - f = self.f - info = { - 'modname': self.modname, - 'entrypointname': self.translator.functions[0].__name__, - 'entrypoint': self.nameof(self.translator.functions[0]), - } - # header - print >> f, self.C_HEADER - - # function implementations - while self.pendingfunctions: - func = self.pendingfunctions.pop() - self.gen_cfunction(func) - # collect more of the latercode after each function - while self.latercode: - gen, self.debugstack = self.latercode.pop() - #self.initcode.extend(gen) -- eats TypeError! bad CPython! - for line in gen: - self.initcode.append(line) - self.debugstack = () - self.gen_global_declarations() - - # footer - print >> f, self.C_INIT_HEADER % info - if self.f2name is not None: - print >> f, '#include "%s"' % self.f2name - for codeline in self.initcode: - print >> f, '\t' + codeline - print >> f, self.C_INIT_FOOTER % info - - def gen_global_declarations(self): - g = self.globaldecl - if g: - f = self.f - print >> f, '/* global declaration%s */' % ('s'*(len(g)>1)) - for line in g: - print >> f, line - print >> f - del g[:] - g = self.globalobjects - for name in g: - self.initcode.append('REGISTER_GLOBAL(%s)' % (name,)) - del g[:] - if self.f2 is not None: - for line in self.initcode: - print >> self.f2, line - del self.initcode[:] - - def gen_cfunction(self, func): -## print 'gen_cfunction (%s:%d) %s' % ( -## func.func_globals.get('__name__', '?'), -## func.func_code.co_firstlineno, -## func.__name__) - - f = self.f - body = list(self.cfunction_body(func)) - name_of_defaults = [self.nameof(x, debug=('Default argument of', func)) - for x in (func.func_defaults or ())] - self.gen_global_declarations() - - # print header - cname = self.nameof(func) - assert cname.startswith('gfunc_') - f_name = 'f_' + cname[6:] - - # collect all the local variables - graph = self.translator.getflowgraph(func) - localslst = [] - def visit(node): - if isinstance(node, Block): - localslst.extend(node.getvariables()) - traverse(visit, graph) - localnames = [a.name for a in uniqueitems(localslst)] - - # collect all the arguments - if func.func_code.co_flags & CO_VARARGS: - vararg = graph.getargs()[-1] - positional_args = graph.getargs()[:-1] - else: - vararg = None - positional_args = graph.getargs() - min_number_of_args = len(positional_args) - len(name_of_defaults) - - fast_args = [a.name for a in positional_args] - if vararg is not None: - fast_args.append(str(vararg)) - fast_name = 'fast' + f_name - - fast_set = dict(zip(fast_args, fast_args)) - - declare_fast_args = [('PyObject *' + a) for a in fast_args] - if declare_fast_args: - declare_fast_args = 'TRACE_ARGS ' + ', '.join(declare_fast_args) - else: - declare_fast_args = 'TRACE_ARGS_VOID' - fast_function_header = ('static PyObject *\n' - '%s(%s)' % (fast_name, declare_fast_args)) - - print >> f, fast_function_header + ';' # forward - print >> f - - print >> f, 'static PyObject *' - print >> f, '%s(PyObject* self, PyObject* args, PyObject* kwds)' % ( - f_name,) - print >> f, '{' - print >> f, '\tFUNCTION_HEAD(%s, %s, args, %s, __FILE__, __LINE__ - 2)' % ( - c_string('%s(%s)' % (cname, ', '.join(name_of_defaults))), - cname, - '(%s)' % (', '.join(map(c_string, name_of_defaults) + ['NULL']),), - ) - - kwlist = ['"%s"' % name for name in - func.func_code.co_varnames[:func.func_code.co_argcount]] - kwlist.append('0') - print >> f, '\tstatic char* kwlist[] = {%s};' % (', '.join(kwlist),) - - if fast_args: - print >> f, '\tPyObject *%s;' % (', *'.join(fast_args)) - print >> f - - print >> f, '\tFUNCTION_CHECK()' - - # argument unpacking - if vararg is not None: - print >> f, '\t%s = PyTuple_GetSlice(args, %d, INT_MAX);' % ( - vararg, len(positional_args)) - print >> f, '\tif (%s == NULL)' % (vararg,) - print >> f, '\t\tFUNCTION_RETURN(NULL)' - print >> f, '\targs = PyTuple_GetSlice(args, 0, %d);' % ( - len(positional_args),) - print >> f, '\tif (args == NULL) {' - print >> f, '\t\tERR_DECREF(%s)' % (vararg,) - print >> f, '\t\tFUNCTION_RETURN(NULL)' - print >> f, '\t}' - tail = """{ -\t\tERR_DECREF(args) -\t\tERR_DECREF(%s) -\t\tFUNCTION_RETURN(NULL); -\t} -\tPy_DECREF(args);""" % vararg - else: - tail = '\n\t\tFUNCTION_RETURN(NULL)' - for i in range(len(name_of_defaults)): - print >> f, '\t%s = %s;' % ( - positional_args[min_number_of_args+i], - name_of_defaults[i]) - fmt = 'O'*min_number_of_args - if min_number_of_args < len(positional_args): - fmt += '|' + 'O'*(len(positional_args)-min_number_of_args) - lst = ['args', 'kwds', - '"%s:%s"' % (fmt, func.__name__), - 'kwlist', - ] - lst += ['&' + a.name for a in positional_args] - print >> f, '\tif (!PyArg_ParseTupleAndKeywords(%s))' % ', '.join(lst), - print >> f, tail - - call_fast_args = list(fast_args) - if call_fast_args: - call_fast_args = 'TRACE_CALL ' + ', '.join(call_fast_args) - else: - call_fast_args = 'TRACE_CALL_VOID' - print >> f, '\treturn %s(%s);' % (fast_name, call_fast_args) - print >> f, '}' - print >> f - - print >> f, fast_function_header - print >> f, '{' - - fast_locals = [arg for arg in localnames if arg not in fast_set] - if fast_locals: - print >> f, '\tPyObject *%s;' % (', *'.join(fast_locals),) - print >> f - - # generate an incref for each input argument - for v in positional_args: - print >> f, '\tPy_INCREF(%s);' % v.name - - # print the body - for line in body: - if line.endswith(':'): - if line.startswith('err'): - fmt = '\t%s' - else: - fmt = ' %s\n' - elif line: - fmt = '\t%s\n' - else: - fmt = '%s\n' - f.write(fmt % line) - print >> f, '}' - - # print the PyMethodDef - print >> f, ('static PyMethodDef ml_%s = {\n' - ' "%s", (PyCFunction)%s, METH_VARARGS|METH_KEYWORDS };' % ( - cname, func.__name__, f_name)) - print >> f - - if not self.translator.frozen: - # this is only to keep the RAM consumption under control - del self.translator.flowgraphs[func] - Variable.instances.clear() - - def cfunction_body(self, func): - graph = self.translator.getflowgraph(func) - remove_direct_loops(graph) - checkgraph(graph) - - blocknum = {} - allblocks = [] - - def expr(v): - if isinstance(v, Variable): - return v.name - elif isinstance(v, Constant): - return self.nameof(v.value, - debug=('Constant in the graph of',func)) - else: - raise TypeError, "expr(%r)" % (v,) - - def gen_link(link, linklocalvars=None): - "Generate the code to jump across the given Link." - has_ref = {} - linklocalvars = linklocalvars or {} - for v in to_release: - linklocalvars[v] = v.name - has_ref = linklocalvars.copy() - for a1, a2 in zip(link.args, link.target.inputargs): - if a1 in linklocalvars: - src = linklocalvars[a1] - else: - src = expr(a1) - line = 'MOVE(%s, %s)' % (src, a2.name) - if a1 in has_ref: - del has_ref[a1] - else: - line += '\tPy_INCREF(%s);' % a2.name - yield line - for v in has_ref: - yield 'Py_DECREF(%s);' % linklocalvars[v] - yield 'goto block%d;' % blocknum[link.target] - - # collect all blocks - def visit(block): - if isinstance(block, Block): - allblocks.append(block) - blocknum[block] = len(blocknum) - traverse(visit, graph) - - # generate the body of each block - for block in allblocks: - yield '' - yield 'block%d:' % blocknum[block] - to_release = list(block.inputargs) - for op in block.operations: - lst = [expr(v) for v in op.args] - lst.append(op.result.name) - lst.append('err%d_%d' % (blocknum[block], len(to_release))) - macro = 'OP_%s' % op.opname.upper() - meth = getattr(self, macro, None) - if meth: - yield meth(lst[:-2], lst[-2], lst[-1]) - else: - yield '%s(%s)' % (macro, ', '.join(lst)) - to_release.append(op.result) - - err_reachable = False - if len(block.exits) == 0: - if len(block.inputargs) == 2: # exc_cls, exc_value - # exceptional return block - exc_cls = expr(block.inputargs[0]) - exc_value = expr(block.inputargs[1]) - yield 'PyErr_Restore(%s, %s, NULL);' % (exc_cls, exc_value) - yield 'FUNCTION_RETURN(NULL)' - else: - # regular return block - retval = expr(block.inputargs[0]) - yield 'FUNCTION_RETURN(%s)' % retval - continue - elif block.exitswitch is None: - # single-exit block - assert len(block.exits) == 1 - for op in gen_link(block.exits[0]): - yield op - yield '' - elif block.exitswitch == Constant(last_exception): - # block catching the exceptions raised by its last operation - # we handle the non-exceptional case first - link = block.exits[0] - assert link.exitcase is None - for op in gen_link(link): - yield op - # we must catch the exception raised by the last operation, - # which goes to the last err%d_%d label written above. - yield '' - to_release.pop() # skip default error handling for this label - yield 'err%d_%d:' % (blocknum[block], len(to_release)) - yield '' - for link in block.exits[1:]: - assert issubclass(link.exitcase, Exception) - yield 'if (PyErr_ExceptionMatches(%s)) {' % ( - self.nameof(link.exitcase),) - yield '\tPyObject *exc_cls, *exc_value, *exc_tb;' - yield '\tPyErr_Fetch(&exc_cls, &exc_value, &exc_tb);' - yield '\tif (exc_value == NULL) {' - yield '\t\texc_value = Py_None;' - yield '\t\tPy_INCREF(Py_None);' - yield '\t}' - yield '\tPy_XDECREF(exc_tb);' - for op in gen_link(link, { - Constant(last_exception): 'exc_cls', - Constant(last_exc_value): 'exc_value'}): - yield '\t' + op - yield '}' - err_reachable = True - else: - # block ending in a switch on a value - for link in block.exits[:-1]: - yield 'if (EQ_%s(%s)) {' % (link.exitcase, - block.exitswitch.name) - for op in gen_link(link): - yield '\t' + op - yield '}' - link = block.exits[-1] - yield 'assert(EQ_%s(%s));' % (link.exitcase, - block.exitswitch.name) - for op in gen_link(block.exits[-1]): - yield op - yield '' - - while to_release: - v = to_release.pop() - if err_reachable: - yield 'ERR_DECREF(%s)' % v.name - yield 'err%d_%d:' % (blocknum[block], len(to_release)) - err_reachable = True - if err_reachable: - yield 'FUNCTION_RETURN(NULL)' - -# ____________________________________________________________ - - C_HEADER = '#include "genc.h"\n' - - C_SEP = "/************************************************************/" - - C_INIT_HEADER = C_SEP + ''' - -MODULE_INITFUNC(%(modname)s) -{ -\tSETUP_MODULE(%(modname)s) -''' - - C_INIT_FOOTER = ''' -\tPyModule_AddObject(m, "%(entrypointname)s", %(entrypoint)s); -}''' - - # the C preprocessor cannot handle operations taking a variable number - # of arguments, so here are Python methods that do it - - def OP_NEWLIST(self, args, r, err): - if len(args) == 0: - return 'OP_NEWLIST0(%s, %s)' % (r, err) - else: - args.insert(0, '%d' % len(args)) - return 'OP_NEWLIST((%s), %s, %s)' % (', '.join(args), r, err) - - def OP_NEWDICT(self, args, r, err): - if len(args) == 0: - return 'OP_NEWDICT0(%s, %s)' % (r, err) - else: - assert len(args) % 2 == 0 - args.insert(0, '%d' % (len(args)//2)) - return 'OP_NEWDICT((%s), %s, %s)' % (', '.join(args), r, err) - - def OP_NEWTUPLE(self, args, r, err): - args.insert(0, '%d' % len(args)) - return 'OP_NEWTUPLE((%s), %s, %s)' % (', '.join(args), r, err) - - def OP_SIMPLE_CALL(self, args, r, err): - args.append('NULL') - return 'OP_SIMPLE_CALL((%s), %s, %s)' % (', '.join(args), r, err) - - def OP_CALL_ARGS(self, args, r, err): - return 'OP_CALL_ARGS((%s), %s, %s)' % (', '.join(args), r, err) - -# ____________________________________________________________ - -def cdecl(type, name): - # Utility to generate a typed name declaration in C syntax. - # For local variables, struct fields, function declarations, etc. - # For complex C types, the 'type' can contain a '@' character that - # specifies where the 'name' should be inserted; for example, an - # array of 10 ints has a type of "int @[10]". - if '@' in type: - return type.replace('@', name) - else: - return ('%s %s' % (type, name)).rstrip() - -# a translation table suitable for str.translate() to remove -# non-C characters from an identifier -C_IDENTIFIER = ''.join([(('0' <= chr(i) <= '9' or - 'a' <= chr(i) <= 'z' or - 'A' <= chr(i) <= 'Z') and chr(i) or '_') - for i in range(256)]) diff --git a/pypy/translator/gencl.py b/pypy/translator/gencl.py deleted file mode 100644 index 0946f01e3a..0000000000 --- a/pypy/translator/gencl.py +++ /dev/null @@ -1,347 +0,0 @@ -from pypy.objspace.flow.model import * -from pypy.translator.annrpython import RPythonAnnotator - -from pypy.translator.simplify import simplify_graph -from pypy.translator.transform import transform_graph - - -DEBUG = False - - -# XXX For 2.2 the emitted code isn't quite right, because we cannot tell -# when we should write "0"/"1" or "nil"/"t". -if not isinstance(bool, type): - class bool(int): - pass - - -class Op: - def __init__(self, gen, op): - self.gen = gen - self.str = gen.str - self.op = op - self.opname = op.opname - self.args = op.args - self.result = op.result - def __call__(self): - if DEBUG: - self.op_default() - return - if self.opname in self.binary_ops: - self.op_binary(self.opname) - else: - default = self.op_default - meth = getattr(self, "op_" + self.opname, default) - meth() - def op_default(self): - print ";;", self.op - print ";; Op", self.opname, "is missing" - binary_ops = { - #"add": "+", - "sub": "-", - "inplace_add": "+", # weird, but it works - "inplace_lshift": "ash", - "mod": "mod", - "lt": "<", - "le": "<=", - "eq": "=", - "gt": ">", - "and_": "logand", - "getitem": "elt", - } - def op_binary(self, op): - s = self.str - result, (arg1, arg2) = self.result, self.args - cl_op = self.binary_ops[op] - print "(setq", s(result), "(", cl_op, s(arg1), s(arg2), "))" - def op_add(self): - s = self.str - result, (arg1, arg2) = self.result, self.args - print "(setq", s(result) - table = { - (int, int): "(+ %s %s)", - (int, long): "(+ %s %s)", - (long, int): "(+ %s %s)", - (long, long): "(+ %s %s)", - (str, str): "(concatenate 'string %s %s)", - (list, list): "(concatenate 'vector %s %s)", - } - self.gen.emit_typecase(table, arg1, arg2) - print ")" - def op_not_(self): - s = self.str - result, (arg1,) = self.result, self.args - print "(setq", s(result), "(not" - table = { - (bool,): "(not %s)", - (int,): "(zerop %s)", - (long,): "(zerop %s)", - (list,): "(zerop (length %s))", - } - self.gen.emit_typecase(table, arg1) - print "))" - def op_is_true(self): - s = self.str - result, (arg1,) = self.result, self.args - print "(setq", s(result) - table = { - (bool,): "%s", - (int,): "(not (zerop %s))", - (long,): "(not (zerop %s))", - (list,): "(not (zerop (length %s)))", - } - self.gen.emit_typecase(table, arg1) - print ")" - def op_newtuple(self): - s = self.str - print "(setq", s(self.result), "(list", - for arg in self.args: - print s(arg), - print "))" - def op_newlist(self): - s = self.str - print "(setq", s(self.result), "(vector", - for arg in self.args: - print s(arg), - print "))" - def op_alloc_and_set(self): - s = self.str - result, (size, init) = self.result, self.args - print "(setq", s(result), "(make-array", s(size), "))" - print "(fill", s(result), s(init), ")" - def op_setitem(self): - s = self.str - (seq, index, element) = self.args - print "(setf (elt", s(seq), s(index), ")", s(element), ")" - def op_iter(self): - s = self.str - result, (seq,) = self.result, self.args - print "(setq", s(result), "(make-iterator", s(seq), "))" - def op_next(self): - s = self.str - result, (iterator,) = self.result, self.args - print "(let ((result (funcall", s(iterator), ")))" - print " (setq", s(result), "(car result))" - print " (setq last-exc (cdr result)))" - builtin_map = { - pow: "expt", - range: "python-range", - } - def op_simple_call(self): - func = self.args[0] - if not isinstance(func, Constant): - self.op_default() - return - func = func.value - if func not in self.builtin_map: - self.op_default() - return - s = self.str - args = self.args[1:] - print "(setq", s(self.result), "(", self.builtin_map[func], - for arg in args: - print s(arg), - print "))" - def op_getslice(self): - s = self.str - result, (seq, start, end) = self.result, self.args - print "(setq", s(result), "(python-slice", s(seq), s(start), s(end), "))" - - -class GenCL: - def __init__(self, fun, input_arg_types=[]): - simplify_graph(fun) - self.fun = fun - self.blockref = {} - self.annotate(input_arg_types) - transform_graph(self.ann) - def annotate(self, input_arg_types): - ann = RPythonAnnotator() - ann.build_types(self.fun, input_arg_types) - self.setannotator(ann) - def setannotator(self, annotator): - self.ann = annotator - def get_type(self, var): - return self.ann.gettype(var) - def str(self, obj): - if isinstance(obj, Variable): - return obj.name - elif isinstance(obj, Constant): - return self.conv(obj.value) - else: - return "#<%r>" % (obj,) - def conv(self, val): - if isinstance(val, bool): # should precedes int - if val: - return "t" - else: - return "nil" - elif isinstance(val, (int, long)): - return str(val) - elif val is None: - return "nil" - elif isinstance(val, str): - val.replace("\\", "\\\\") - val.replace("\"", "\\\"") - val = '"' + val + '"' - return val - elif isinstance(val, type(Exception)) and issubclass(val, Exception): - return "'%s" % val.__name__ - elif val is last_exception: - return "last-exc" - elif val is last_exc_value: - return "'last-exc-value" - else: - return "#<%r>" % (val,) - def emitcode(self, public=True): - import sys - from cStringIO import StringIO - out = StringIO() - oldstdout = sys.stdout - sys.stdout = out - self.emit() - sys.stdout = oldstdout - return out.getvalue() - def emit(self): - self.emit_defun(self.fun) - def emit_defun(self, fun): - print ";;;; Main" - print "(defun", fun.name - arglist = fun.getargs() - print "(", - for arg in arglist: - print self.str(arg), - print ")" - print "(prog" - blocklist = [] - def collect_block(node): - if isinstance(node, Block): - blocklist.append(node) - traverse(collect_block, fun) - vardict = {} - for block in blocklist: - tag = len(self.blockref) - self.blockref[block] = tag - for var in block.getvariables(): - vardict[var] = self.get_type(var) - print "( last-exc", - for var in vardict: - if var in arglist: - print "(", self.str(var), self.str(var), ")", - else: - print self.str(var), - print ")" - print ";; DEBUG: type inference" - for var in vardict: - tp = vardict[var] - if tp: - print ";;", self.str(var), "is", tp.__name__ - print "(setq last-exc nil)" - for block in blocklist: - self.emit_block(block) - print ")" - print ")" - def emit_block(self, block): - self.cur_block = block - tag = self.blockref[block] - print "tag" + str(tag) - for op in block.operations: - emit_op = Op(self, op) - emit_op() - exits = block.exits - if len(exits) == 1: - self.emit_link(exits[0]) - elif len(exits) > 1: - # only works in the current special case - if (len(exits) == 2 and - exits[0].exitcase == False and - exits[1].exitcase == True): - print "(if", self.str(block.exitswitch) - print "(progn" - self.emit_link(exits[1]) - print ") ; else" - print "(progn" - self.emit_link(exits[0]) - print "))" - else: - # this is for the more general case. The previous special case - # shouldn't be needed but in Python 2.2 we can't tell apart - # 0 vs nil and 1 vs t :-( - for exit in exits[:-1]: - print "(if (equalp", self.str(block.exitswitch), - print self.conv(exit.exitcase), ')' - print "(progn" - self.emit_link(exit) - print ")" - print "(progn ; else should be", self.conv(exits[-1].exitcase) - self.emit_link(exits[-1]) - print ")" * len(exits) - elif len(block.inputargs) == 2: # exc_cls, exc_value - exc_cls = self.str(block.inputargs[0]) - exc_value = self.str(block.inputargs[1]) - print "(something-like-throw-exception %s %s)" % (exc_cls, exc_value) - else: - retval = self.str(block.inputargs[0]) - print "(return", retval, ")" - def emit_jump(self, block): - tag = self.blockref[block] - print "(go", "tag" + str(tag), ")" - def emit_link(self, link): - source = link.args - target = link.target.inputargs - print "(psetq", # parallel assignment - for s, t in zip(source, target): - if s != t and not isinstance(s, UndefinedConstant): - print self.str(t), self.str(s), - print ")" - self.emit_jump(link.target) - typemap = { - bool: "boolean", - int: "fixnum", - long: "bignum", - type(''): "string", # hack, 'str' is in the namespace! - list: "vector", - } - def emit_typecase(self, table, *args): - argreprs = tuple(map(self.str, args)) - argtypes = tuple(map(self.get_type, args)) - if argtypes in table: - trans = table[argtypes] - print trans % argreprs - else: - print "(cond" - for argtypes in table: - print "((and", - for tp, s in zip(argtypes, argreprs): - cl_tp = "'" + self.typemap[tp] - print "(typep", s, cl_tp, ")", - print ")" - trans = table[argtypes] - print trans % argreprs, - print ")" - print ")" - def globaldeclarations(self): - return prelude - - -prelude = """;;;; Prelude -(defun make-iterator (seq) - (let ((i 0)) - (lambda () - (if (< i (length seq)) - (let ((v (elt seq i))) (incf i) (cons v nil)) - (cons nil 'StopIteration))))) -(defun python-slice (seq start end) - (let ((l (length seq))) - (if (not start) (setf start 0)) - (if (not end) (setf end l)) - (if (minusp start) (incf start l)) - (if (minusp end) (incf end l)) - (subseq seq start end))) -;; temporary -(defun python-range (end) - (let ((a (make-array end))) - (loop for i below end - do (setf (elt a i) i) - finally (return a)))) -""" diff --git a/pypy/translator/geninterplevel.py b/pypy/translator/geninterplevel.py deleted file mode 100644 index 0d587c70d8..0000000000 --- a/pypy/translator/geninterplevel.py +++ /dev/null @@ -1,1203 +0,0 @@ -""" -Implementation of a translator from application Python to interpreter level RPython. - -The idea is that we can automatically transform app-space implementations -of methods into some equivalent representation at interpreter level. -Then, the RPython to C translation might hopefully spit out some -more efficient code than always interpreting these methods. - -Note that the appspace functions are treated as rpythonic, in a sense -that globals are constants, for instance. This definition is not -exact and might change. - -This module appears to be already quite usable. -But I need to ask how we want to integrate it. - -XXX open questions: -- do we wantamoduleperapp-spaceoperation? -- do we want to auto-generate stuff? -- do we want to create code that is more similar to the app code? -- do we want to create specialized code for constants? -- do we want to use small tail-functions instead of goto? -- do we want to inline small functions? -- do we want to translate non-rpythonic code as well? -""" - -from __future__ import generators -import autopath, os, sys, exceptions -from pypy.objspace.flow.model import Variable, Constant, SpaceOperation -from pypy.objspace.flow.model import FunctionGraph, Block, Link -from pypy.objspace.flow.model import last_exception, last_exc_value -from pypy.objspace.flow.model import traverse, uniqueitems, checkgraph -from pypy.translator.simplify import remove_direct_loops -from pypy.interpreter.pycode import CO_VARARGS -from pypy.annotation import model as annmodel -from types import FunctionType, CodeType -from pypy.interpreter.error import OperationError -from pypy.objspace.std.restricted_int import r_int, r_uint - -from pypy.translator.translator import Translator -from pypy.objspace.std import StdObjSpace -from pypy.objspace.flow import FlowObjSpace - -from pypy.interpreter.gateway import app2interp, interp2app - - -# ____________________________________________________________ - -def c_string(s): - return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\"'),) - -def uniquemodulename(name, SEEN={}): - # never reuse the same module name within a Python session! - i = 0 - while True: - i += 1 - result = '%s_%d' % (name, i) - if result not in SEEN: - SEEN[result] = True - return result - -def ordered_blocks(graph): - # collect all blocks - allblocks = [] - def visit(block): - if isinstance(block, Block): - # first we order by offset in the code string - if block.operations: - ofs = block.operations[0].offset - else: - ofs = sys.maxint - # then we order by input variable name or value - if block.inputargs: - txt = str(block.inputargs[0]) - else: - txt = "dummy" - allblocks.append((ofs, txt, block)) - traverse(visit, graph) - allblocks.sort() - #for ofs, txt, block in allblocks: - # print ofs, txt, block - return [block for ofs, txt, block in allblocks] - -class UniqueList(list): - def __init__(self, *args, **kwds): - list.__init__(self, *args, **kwds) - self.dic = {} - - def append(self, arg): - try: - self.dic[arg] - except KeyError: - self.dic[arg] = 1 - list.append(self, arg) - except TypeError: # not hashable - if arg not in self: - list.append(self, arg) - -class GenRpy: - def __init__(self, translator, modname=None): - self.translator = translator - self.modname = self.trans_funcname(modname or - uniquemodulename(translator.functions[0].__name__)) - self.rpynames = {Constant(None).key: 'space.w_None', - Constant(False).key: 'space.w_False', - Constant(True).key: 'space.w_True', - } - - self.seennames = {} - u = UniqueList - self.initcode = u() # list of lines for the module's initxxx() - self.latercode = u() # list of generators generating extra lines - # for later in initxxx() -- for recursive - # objects - self.globaldecl = [] - self.globalobjects = [] - self.pendingfunctions = [] - self.currentfunc = None - self.debugstack = () # linked list of nested nameof() - - # special constructors: - self.has_listarg = {} - for name in "newtuple newlist newdict newstring".split(): - self.has_listarg[name] = name - - # XXX I guess it is cleaner to use the flow space? - # we just want to look into the operations, - # but we don't break into the implementation. - # Or should it be the base space, ARMIN? - #self.space = StdObjSpace() # for introspection - self.space = FlowObjSpace() # for introspection - - self.use_fast_call = False - - def expr(self, v, localnames, wrapped = True): - if isinstance(v, Variable): - n = v.name - if n.startswith("v") and n[1:].isdigit(): - ret = localnames.get(v.name) - if not ret: - if wrapped: - localnames[v.name] = ret = "w_%d" % len(localnames) - else: - localnames[v.name] = ret = "v%d" % len(localnames) - return ret - elif isinstance(v, Constant): - return self.nameof(v.value, - debug=('Constant in the graph of', self.currentfunc)) - else: - raise TypeError, "expr(%r)" % (v,) - - def arglist(self, args, localnames): - res = [self.expr(arg, localnames) for arg in args] - return ", ".join(res) - - def oper(self, op, localnames): - if op.opname == "simple_call": - v = op.args[0] - exv = self.expr(v, localnames) - if exv.startswith("space.") and not exv.startswith("space.w_"): - # it is a space method - fmt = "%(res)s = %(func)s(%(args)s)" - else: - # default for a spacecall: - fmt = ("_tup = space.newtuple([%(args)s])\n" - "%(res)s = space.call(%(func)s, _tup)") - # see if we can optimize for a fast call. - # we justdo the very simple ones. - if self.use_fast_call and (isinstance(v, Constant) - and exv.startswith('gfunc_')): - func = v.value - if (not func.func_code.co_flags & CO_VARARGS) and ( - func.func_defaults is None): - fmt = "%(res)s = fastf_%(func)s(space, %(args)s)" - exv = exv[6:] - return fmt % {"res" : self.expr(op.result, localnames), - "func": exv, - "args": self.arglist(op.args[1:], localnames) } - if op.opname in self.has_listarg: - fmt = "%s = %s([%s])" - else: - fmt = "%s = %s(%s)" - # special case is_true - wrapped = op.opname != "is_true" - oper = "space.%s" % op.opname - return fmt % (self.expr(op.result, localnames, wrapped), oper, - self.arglist(op.args, localnames)) - - def large_assignment(self, left, right, margin=65): - expr = "(%s) = (%s)" % (", ".join(left), ", ".join(right)) - pieces = expr.split(",") - res = [pieces.pop(0)] - for piece in pieces: - if len(res[-1])+len(piece)+1 > margin: - res[-1] += "," - res.append(piece) - else: - res[-1] += (","+piece) - return res - - def large_initialize(self, vars, margin=65): - res = [] - nonestr = "None" - margin -= len(nonestr) - for var in vars: - ass = var+"=" - if not res or len(res[-1]) >= margin: - res.append(ass) - res[-1] += ass - res = [line + nonestr for line in res] - return res - - def gen_link(self, link, localvars, blocknum, block, linklocalvars=None): - "Generate the code to jump across the given Link." - linklocalvars = linklocalvars or {} - left, right = [], [] - for a1, a2 in zip(link.args, link.target.inputargs): - if a1 in linklocalvars: - src = linklocalvars[a1] - else: - src = self.expr(a1, localvars) - left.append(self.expr(a2, localvars)) - right.append(src) - txt = "%s = %s" % (", ".join(left), ", ".join(right)) - if len(txt) <= 65: # arbitrary - yield txt - else: - for line in self.large_assignment(left, right): - yield line - goto = blocknum[link.target] - yield 'goto = %d' % goto - if goto <= blocknum[block]: - yield 'continue' - - def nameof(self, obj, debug=None, namehint=None): - key = Constant(obj).key - try: - return self.rpynames[key] - except KeyError: - if debug: - stackentry = debug, obj - else: - stackentry = obj - self.debugstack = (self.debugstack, stackentry) - if (type(obj).__module__ != '__builtin__' and - not isinstance(obj, type)): # skip user-defined metaclasses - # assume it's a user defined thingy - name = self.nameof_instance(obj) - else: - for cls in type(obj).__mro__: - meth = getattr(self, - 'nameof_' + cls.__name__.replace(' ', ''), - None) - if meth: - break - else: - raise Exception, "nameof(%r)" % (obj,) - - code=meth.im_func.func_code - if namehint and 'namehint' in code.co_varnames[:code.co_argcount]: - name = meth(obj, namehint=namehint) - else: - name = meth(obj) - self.debugstack, x = self.debugstack - assert x is stackentry - self.rpynames[key] = name - return name - - def uniquename(self, basename): - basename = basename.translate(C_IDENTIFIER) - n = self.seennames.get(basename, 0) - self.seennames[basename] = n+1 - if n == 0: - self.globalobjects.append(basename) - self.globaldecl.append('# global object %s' % (basename,)) - return basename - else: - return self.uniquename('%s_%d' % (basename, n)) - - def nameof_object(self, value): - if type(value) is not object: - #raise Exception, "nameof(%r) in %r" % (value, self.currentfunc) - name = self.uniquename('g_unknown_%r' % value) - self.initcode.append('# cannot build %s as %r' % (name, object)) - return name - name = self.uniquename('g_object') - self.initcode.append('m.%s = object()'%name) - return name - - def nameof_module(self, value): - assert value is os or not hasattr(value, "__file__") or \ - not (value.__file__.endswith('.pyc') or - value.__file__.endswith('.py') or - value.__file__.endswith('.pyo')), \ - "%r is not a builtin module (probably :)"%value - name = self.uniquename('mod_%s'%value.__name__) - self.initcode.append('import %s as _tmp' % value.__name__) - self.initcode.append('m.%s = space.wrap(_tmp)' % (name)) - return name - - - def nameof_int(self, value): - if value >= 0: - name = 'gi_%d' % value - else: - name = 'gim_%d' % abs(value) - name = self.uniquename(name) - self.initcode.append('m.%s = space.newint(%d)' % (name, value)) - return name - - def nameof_long(self, value): - # allow short longs only, meaning they - # must fit into a machine word. - assert (sys.maxint*2+1)&value==value, "your literal long is too long" - # assume we want them in hex most of the time - if value < 256L: - s = "%dL" % value - else: - s = "0x%08xL" % value - if value >= 0: - name = 'glong_%s' % s - else: - name = 'glongm_%d' % abs(value) - name = self.uniquename(name) - self.initcode.append('m.%s = space.wrap(%s) # XXX implement long!' % (name, s)) - return name - - def nameof_float(self, value): - name = 'gfloat_%s' % value - name = (name.replace('-', 'minus') - .replace('.', 'dot')) - name = self.uniquename(name) - self.initcode.append('m.%s = space.newfloat(%r)' % (name, value)) - return name - - def nameof_str(self, value): - if [c for c in value if c<' ' or c>'~' or c=='"' or c=='\\']: - # non-printable string - namestr = repr(value)[1:-1] - else: - # printable string - namestr = value - if not namestr: - namestr = "_emptystr_" - name = self.uniquename('gs_' + namestr[:32]) - # self.initcode.append('m.%s = space.newstring(%r)' % (name, value)) - # ick! very unhandy - self.initcode.append('m.%s = space.wrap(%r)' % (name, value)) - return name - - def skipped_function(self, func): - # debugging only! Generates a placeholder for missing functions - # that raises an exception when called. - name = self.uniquename('gskippedfunc_' + func.__name__) - self.globaldecl.append('# global decl %s' % (name, )) - self.initcode.append('# build func %s' % name) - return name - - def trans_funcname(self, s): - return s.translate(C_IDENTIFIER) - - def nameof_function(self, func, namehint=''): - printable_name = '(%s:%d) %s' % ( - self.trans_funcname(func.func_globals.get('__name__', '?')), - func.func_code.co_firstlineno, - func.__name__) - if self.translator.frozen: - if func not in self.translator.flowgraphs: - print "NOT GENERATING", printable_name - return self.skipped_function(func) - else: - if (func.func_doc and - func.func_doc.lstrip().startswith('NOT_RPYTHON')): - print "skipped", printable_name - return self.skipped_function(func) - name = self.uniquename('gfunc_' + self.trans_funcname( - namehint + func.__name__)) - f_name = 'f_' + name[6:] - self.initcode.append('from pypy.interpreter.gateway import interp2app') - self.initcode.append('m.%s = space.wrap(interp2app(%s))' % (name, f_name)) - self.pendingfunctions.append(func) - return name - - def nameof_staticmethod(self, sm): - # XXX XXX XXXX - func = sm.__get__(42.5) - name = self.uniquename('gsm_' + func.__name__) - functionname = self.nameof(func) - self.initcode.append('m.%s = space.wrap(%s)' % (name, functionname)) - return name - - def nameof_instancemethod(self, meth): - if meth.im_self is None: - # no error checking here - return self.nameof(meth.im_func, namehint="%s_" % meth.im_class.__name__) - else: - ob = self.nameof(meth.im_self) - func = self.nameof(meth.im_func) - typ = self.nameof(meth.im_class) - name = self.uniquename('gmeth_'+meth.im_func.__name__) - self.initcode.append( - '%s = space.getattr(%s, %s)'%(name, ob, func)) - return name - - def should_translate_attr(self, pbc, attr): - ann = self.translator.annotator - if ann is None: - ignore = getattr(pbc.__class__, 'NOT_RPYTHON_ATTRIBUTES', []) - if attr in ignore: - return False - else: - return "probably" # True - classdef = ann.getuserclasses().get(pbc.__class__) - if classdef and classdef.about_attribute(attr) is not None: - return True - return False - - def later(self, gen): - self.latercode.append((gen, self.debugstack)) - - def nameof_instance(self, instance): - name = self.uniquename('ginst_' + instance.__class__.__name__) - cls = self.nameof(instance.__class__) - def initinstance(): - content = instance.__dict__.items() - content.sort() - for key, value in content: - if self.should_translate_attr(instance, key): - yield 'space.setattr(%s, %s, %s)' % ( - name, self.nameof(key), self.nameof(value)) - if isinstance(instance, Exception): - # specialcase for exception instances: wrap them directly - self.initcode.append('_ins = %s()\n' - 'm.%s = space.wrap(_ins)' % ( - instance.__class__.__name__, name)) - else: - # this seems to hardly work with the faked stuff - self.initcode.append('from types import InstanceType') - self.initcode.append('w_InstanceType = space.wrap(InstanceType)') - self.initcode.append('_tup = space.newtuple([%s])\n' - 'm.%s = space.call(w_InstanceType, _tup)' % ( - cls, name)) - self.later(initinstance()) - return name - - def nameof_builtin_function_or_method(self, func): - if func.__self__ is None: - # builtin function - if hasattr(self.space, func.__name__): - return "space.%s" % func.__name__ - # where does it come from? Python2.2 doesn't have func.__module__ - for modname, module in sys.modules.items(): - if hasattr(module, '__file__'): - if (module.__file__.endswith('.py') or - module.__file__.endswith('.pyc') or - module.__file__.endswith('.pyo')): - continue # skip non-builtin modules - if func is getattr(module, func.__name__, None): - break - else: - raise Exception, '%r not found in any built-in module' % (func,) - name = self.uniquename('gbltin_' + func.__name__) - if modname == '__builtin__': - self.initcode.append('m.%s = space.getattr(space.w_builtin, %s)'% ( - name, self.nameof(func.__name__))) - else: - self.initcode.append('m.%s = space.getattr(%s, %s)' % ( - name, self.nameof(module), self.nameof(func.__name__))) - else: - # builtin (bound) method - name = self.uniquename('gbltinmethod_' + func.__name__) - self.initcode.append('m.%s = space.getattr(%s, %s)' % ( - name, self.nameof(func.__self__), self.nameof(func.__name__))) - return name - - def nameof_classobj(self, cls): - if cls.__doc__ and cls.__doc__.lstrip().startswith('NOT_RPYTHON'): - raise Exception, "%r should never be reached" % (cls,) - - metaclass = "space.w_type" - name = self.uniquename('gcls_' + cls.__name__) - if issubclass(cls, Exception): - if cls.__module__ == 'exceptions': - if hasattr(self.space, "w_%s" % cls.__name__): - return 'space.w_%s'%cls.__name__ - else: - self.initcode.append('m.%s = space.wrap(%s)' % ( - name, cls.__name__)) - return name - #else: - # # exceptions must be old-style classes (grr!) - # metaclass = "&PyClass_Type" - # For the moment, use old-style classes exactly when the - # pypy source uses old-style classes, to avoid strange problems. - if not isinstance(cls, type): - assert type(cls) is type(Exception) - # self.initcode.append("import types\n" - # "m.classtype = space.wrap(types.ClassType)\n") - # metaclass = "m.classtype" - # XXX I cannot instantiate these. - # XXX using type instead, since we still inherit from exception - # XXX what is the future of classes in pypy? - - basenames = [self.nameof(base) for base in cls.__bases__] - def initclassobj(): - content = cls.__dict__.items() - content.sort() - for key, value in content: - if key.startswith('__'): - if key in ['__module__', '__doc__', '__dict__', - '__weakref__', '__repr__', '__metaclass__']: - continue - # XXX some __NAMES__ are important... nicer solution sought - #raise Exception, "unexpected name %r in class %s"%(key, cls) - - # redirect value through class interface, in order to - # get methods instead of functions. - value = getattr(cls, key) - - if isinstance(value, staticmethod) and value.__get__(1) not in self.translator.flowgraphs and self.translator.frozen: - print "skipped staticmethod:", value - continue - if isinstance(value, FunctionType) and value not in self.translator.flowgraphs and self.translator.frozen: - print "skippedfunction:", value - continue - - yield 'space.setattr(%s, %s, %s)' % ( - name, self.nameof(key), self.nameof(value)) - - baseargs = ", ".join(basenames) - self.initcode.append('_dic = space.newdict([])\n' - '_bases = space.newtuple([%(bases)s])\n' - '_args = space.newtuple([%(name)s, _bases, _dic])\n' - 'm.%(klass)s = space.call(%(meta)s, _args)' - % {"bases": baseargs, - "klass": name, - "name" : self.nameof(cls.__name__), - "meta" : metaclass} ) - - self.later(initclassobj()) - return name - - nameof_class = nameof_classobj # for Python 2.2 - - typename_mapping = { - object: 'space.w_object', - int: 'space.w_int', - long: 'space.w_long', - bool: 'space.w_bool', - list: 'space.w_list', - tuple: 'space.w_tuple', - dict: 'space.w_dict', - str: 'space.w_str', - float: 'space.w_float', - type(Exception()): 'space.wrap(types.InstanceType)', - type: 'space.w_type', - complex:'space.wrap(types.ComplexType)', - unicode:'space.w_unicode', - file: 'space.wrap(file)', - type(None): 'space.wrap(types.NoneType)', - CodeType: 'space.wrap(types.CodeType)', - - ##r_int: 'space.w_int', - ##r_uint: 'space.w_int', - - # XXX we leak 5 references here, but that's the least of the - # problems with this section of code - # type 'builtin_function_or_method': - type(len): 'space.wrap(types.FunctionType)', - # type 'method_descriptor': - # XXX small problem here: - # XXX with space.eval, we get <W_TypeObject(method)> - # XXX but with wrap, we get <W_TypeObject(instancemethod)> - type(list.append): 'eval_helper(space, "list.append")', - # type 'wrapper_descriptor': - type(type(None).__repr__): 'eval_helper(space, ".type(None).__repr__")', - # type 'getset_descriptor': - # XXX here we get <W_TypeObject(FakeDescriptor)>, - # while eval gives us <W_TypeObject(GetSetProperty)> - type(type.__dict__['__dict__']): 'eval_helper(space,'\ - ' "type(type.__dict__[\'__dict__\'])")', - # type 'member_descriptor': - # XXX this does not work in eval! - # type(type.__dict__['__basicsize__']): "cannot eval type(type.__dict__['__basicsize__'])", - # XXX there seems to be no working support for member descriptors ??? - type(type.__dict__['__basicsize__']): "space.wrap(type(type.__dict__['__basicsize__']))", - } - - def nameof_type(self, cls): - if cls in self.typename_mapping: - return self.typename_mapping[cls] - assert cls.__module__ != '__builtin__', \ - "built-in class %r not found in typename_mapping" % (cls,) - return self.nameof_classobj(cls) - - def nameof_tuple(self, tup): - name = self.uniquename('g%dtuple' % len(tup)) - args = [self.nameof(x) for x in tup] - args = ', '.join(args) - self.initcode.append('m.%s = space.newtuple([%s])' % (name, args)) - return name - - def nameof_list(self, lis): - name = self.uniquename('g%dlist' % len(lis)) - def initlist(): - for i in range(len(lis)): - item = self.nameof(lis[i]) - yield 'space.setitem(%s, %s, %s);' % ( - name, self.nameof(i), self.nameof(item)) - self.initcode.append('m.%s = space.newlist(%s)' % (name, self.nameof(0))) - self.initcode.append('m.%s = space.mul(%s, %s)' % (name, name, self.nameof(len(lis)))) - self.later(initlist()) - return name - - def nameof_dict(self, dic): - assert dic is not __builtins__ - assert '__builtins__' not in dic, 'Seems to be the globals of %s' % ( - dic.get('__name__', '?'),) - name = self.uniquename('g%ddict' % len(dic)) - def initdict(): - for k in dic: - yield ('space.setitem(%s, %s, %s)'%( - name, self.nameof(k), self.nameof(dic[k]))) - self.initcode.append('m.%s = space.newdict([])' % (name,)) - self.later(initdict()) - return name - - # strange prebuilt instances below, don't look too closely - # XXX oh well. - def nameof_member_descriptor(self, md): - name = self.uniquename('gdescriptor_%s_%s' % ( - md.__objclass__.__name__, md.__name__)) - cls = self.nameof(md.__objclass__) - # do I need to take the dict and then getitem??? - self.initcode.append('m.%s = space.getattr(%s, %s)' % - (name, cls, self.nameof(md.__name__))) - return name - nameof_getset_descriptor = nameof_member_descriptor - nameof_method_descriptor = nameof_member_descriptor - nameof_wrapper_descriptor = nameof_member_descriptor - - def nameof_file(self, fil): - if fil is sys.stdin: - return 'PySys_GetObject("stdin")' - if fil is sys.stdout: - return 'PySys_GetObject("stdout")' - if fil is sys.stderr: - return 'PySys_GetObject("stderr")' - raise Exception, 'Cannot translate an already-open file: %r' % (fil,) - - def gen_source(self, fname, ftmpname=None): - self.fname = fname - self.ftmpname = ftmpname - - # generate unordered source file, first. - # I prefer this over ordering everything in memory. - fname = self.fname - if self.ftmpname: - fname = self.ftmpname - f = file(fname, "w") - # generate ordered source file - try: - self.f = f - self.gen_source_temp() - finally: - f.close() - - def copyfile(source, target): - file(target, "w").write(file(source).read()) - - def order_sections(fname): - sep = "\n##SECTION##\n" - txt = file(fname).read() - pieces = txt.split(sep) - prelude = pieces.pop(0) - postlude = pieces.pop() - dic = {} - while pieces: - func = pieces.pop() - head = pieces.pop() - key = makekey(head, len(pieces)) - dic[key] = head + sep + func - lis = dic.items() - lis.sort() - lis = [prelude] + [func for head, func in lis] + [postlude] - txt = sep.join(lis) - file(fname, "w").write(txt) - - def makekey(txt, uniqueno): - dic = {} - for line in txt.split("\n"): - ign, name, value = line.split(None, 2) - dic[name] = eval(value) - key = dic["filename"], dic["firstlineno"], uniqueno - return key - - order_sections(fname) - if self.ftmpname: - copyfile(self.ftmpname, self.fname) - - def gen_source_temp(self): - f = self.f - info = { - 'modname': self.modname, - 'entrypointname': self.trans_funcname( - self.translator.functions[0].__name__), - 'entrypoint': self.nameof(self.translator.functions[0]), - } - # header - print >> f, self.RPY_HEADER - - # function implementations - while self.pendingfunctions: - func = self.pendingfunctions.pop() - self.currentfunc = func - self.gen_rpyfunction(func) - # collect more of the latercode after each function - while self.latercode: - gen, self.debugstack = self.latercode.pop() - #self.initcode.extend(gen) -- eats TypeError! bad CPython! - for line in gen: - self.initcode.append(line) - self.debugstack = () - self.gen_global_declarations() - - # set the final splitter - print >> f, "##SECTION##" - # footer - print >> f, self.RPY_INIT_HEADER % info - for codelines in self.initcode: - for codeline in codelines.split("\n"): - print >> f, " %s" % codeline - print >> f, self.RPY_INIT_FOOTER % info - f.close() - - def gen_global_declarations(self): - g = self.globaldecl - if g: - f = self.f - print >> f, '# global declaration%s' % ('s'*(len(g)>1)) - for line in g: - print >> f, line - print >> f - del g[:] - g = self.globalobjects - for name in g: - pass # self.initcode.append('# REGISTER_GLOBAL(%s)' % (name,)) - del g[:] - - def gen_rpyfunction(self, func): - - f = self.f - print >> f, "##SECTION##" # simple to split, afterwards - print >> f, ("## filename %r\n" - "## function %r\n" - "## firstlineno %d") % ( - func.func_code.co_filename, - func.func_code.co_name, - func.func_code.co_firstlineno) - print >> f, "##SECTION##" - locals = {} - body = list(self.rpyfunction_body(func, locals)) - name_of_defaults = [self.nameof(x, debug=('Default argument of', func)) - for x in (func.func_defaults or ())] - self.gen_global_declarations() - - # print header - cname = self.nameof(func) - assert cname.startswith('gfunc_') - f_name = 'f_' + cname[6:] - - # collect all the local variables - graph = self.translator.getflowgraph(func) - localslst = [] - def visit(node): - if isinstance(node, Block): - localslst.extend(node.getvariables()) - traverse(visit, graph) - localnames = [self.expr(a, locals) for a in uniqueitems(localslst)] - - # collect all the arguments - if func.func_code.co_flags & CO_VARARGS: - vararg = graph.getargs()[-1] - positional_args = graph.getargs()[:-1] - else: - vararg = None - positional_args = graph.getargs() - min_number_of_args = len(positional_args) - len(name_of_defaults) - - fast_args = [self.expr(a, locals) for a in positional_args] - if vararg is not None: - fast_args.append(self.expr(vararg, locals)) - fast_name = 'fast' + f_name - - fast_set = dict(zip(fast_args, fast_args)) - - # create function declaration - name = func.__name__ - argstr = ", ".join(fast_args) - fast_function_header = ('def %s(space, %s):' - % (fast_name, argstr)) - - print >> f, 'def %s(space, *args_w):' % (f_name,) - kwlist = ['"%s"' % name for name in - func.func_code.co_varnames[:func.func_code.co_argcount]] - print >> f, ' kwlist = [%s]' % (', '.join(kwlist),) - - # argument unpacking - if vararg is not None: - varname = self.expr(vararg, locals) - lenargs = len(positional_args) - print >> f, ' %s = space.newtuple(list(args_w[%d:]))' % ( - varname, lenargs) - print >> f, ' _args_w = args_w[:%d]' % (lenargs,) - else: - print >> f, ' _args_w = args_w' - varname = None - - def tupstr(seq): - if len(seq) == 1: - fmt = '%s,' - else: - fmt = '%s' - return fmt % ', '.join(seq) - - print >> f, ' defaults_w = (%s)' % tupstr(name_of_defaults) - - theargs = [arg for arg in fast_args if arg != varname] - txt = ('from pypy.translator.genrpy import PyArg_ParseMini\n' - 'm.PyArg_ParseMini = PyArg_ParseMini\n' - 'from pypy.interpreter.error import OperationError\n' - 'm.OperationError = OperationError') - self.initcode.append(txt) - print >> f, ' funcname = "%s"' % func.__name__ - if theargs: - txt = ' %s = PyArg_ParseMini(space, funcname, %d, %d, _args_w, defaults_w)' - print >>f, txt % (tupstr(theargs), - min_number_of_args, len(positional_args)) - else: - txt = ' PyArg_ParseMini(space, funcname, %d, %d, _args_w, defaults_w)' - print >>f, txt % (min_number_of_args, len(positional_args)) - print >> f, ' return %s(space, %s)' % (fast_name, ', '.join(fast_args)) - print >> f - - print >> f, fast_function_header - - fast_locals = [arg for arg in localnames if arg not in fast_set] - if fast_locals: - print >> f - for line in self.large_initialize(fast_locals): - print >> f, " %s" % line - print >> f - # generate an incref for each input argument - # skipped - - # print the body - for line in body: - print >> f, line - print >> f - - # print the PyMethodDef - # skipped - - if not self.translator.frozen: - # this is only to keep the RAM consumption under control - del self.translator.flowgraphs[func] - Variable.instances.clear() - - def rpyfunction_body(self, func, localvars): - try: - graph = self.translator.getflowgraph(func) - except Exception, e: - print 20*"*", e - print func - raise - # not needed, we use tuple assignment! - # remove_direct_loops(graph) - checkgraph(graph) - - allblocks = [] - - f = self.f - t = self.translator - #t.simplify(func) - graph = t.getflowgraph(func) - - start = graph.startblock - allblocks = ordered_blocks(graph) - nblocks = len(allblocks) - - blocknum = {} - for block in allblocks: - blocknum[block] = len(blocknum)+1 - - yield " goto = %d # startblock" % blocknum[start] - yield " while True:" - - def render_block(block): - catch_exception = block.exitswitch == Constant(last_exception) - regular_op = len(block.operations) - catch_exception - # render all but maybe the last op - for op in block.operations[:regular_op]: - for line in self.oper(op, localvars).split("\n"): - yield "%s" % line - # render the last op if it is exception handled - for op in block.operations[regular_op:]: - yield "try:" - for line in self.oper(op, localvars).split("\n"): - yield " %s" % line - - if len(block.exits) == 0: - if len(block.inputargs) == 2: # exc_cls, exc_value - # exceptional return block - exc_cls = self.expr(block.inputargs[0], localvars) - exc_val = self.expr(block.inputargs[1], localvars) - yield "raise OperationError(%s, %s)" % (exc_cls, exc_val) - else: - # regular return block - retval = self.expr(block.inputargs[0], localvars) - yield "return %s" % retval - return - elif block.exitswitch is None: - # single-exit block - assert len(block.exits) == 1 - for op in self.gen_link(block.exits[0], localvars, blocknum, block): - yield "%s" % op - elif catch_exception: - # block catching the exceptions raised by its last operation - # we handle the non-exceptional case first - link = block.exits[0] - assert link.exitcase is None - for op in self.gen_link(link, localvars, blocknum, block): - yield " %s" % op - # we must catch the exception raised by the last operation, - # which goes to the last err%d_%d label written above. - # Since we only have OperationError, we need to select: - yield "except OperationError, e:" - q = "if" - for link in block.exits[1:]: - assert issubclass(link.exitcase, Exception) - # Exeption classes come unwrapped in link.exitcase - yield " %s space.is_true(space.issubtype(e.w_type, %s)):" % (q, - self.nameof(link.exitcase)) - q = "elif" - for op in self.gen_link(link, localvars, blocknum, block, { - Constant(last_exception): 'e.w_type', - Constant(last_exc_value): 'e.w_value'}): - yield " %s" % op - yield " else:raise # unhandled case, should not happen" - else: - # block ending in a switch on a value - exits = list(block.exits) - if len(exits) == 2 and ( - exits[0].exitcase is False and exits[1].exitcase is True): - # order these guys like Python does - exits.reverse() - q = "if" - for link in exits[:-1]: - yield "%s %s == %s:" % (q, self.expr(block.exitswitch, - localvars), - link.exitcase) - for op in self.gen_link(link, localvars, blocknum, block): - yield " %s" % op - q = "elif" - link = exits[-1] - yield "else:" - yield " assert %s == %s" % (self.expr(block.exitswitch, - localvars), - link.exitcase) - for op in self.gen_link(exits[-1], localvars, blocknum, block): - yield " %s" % op - - for block in allblocks: - blockno = blocknum[block] - yield "" - yield " if goto == %d:" % blockno - for line in render_block(block): - yield " %s" % line - -# ____________________________________________________________ - - RPY_HEADER = '#!/bin/env python\n# -*- coding: LATIN-1 -*-' - - RPY_SEP = "#*************************************************************" - - RPY_INIT_HEADER = RPY_SEP + ''' - -def init%(modname)s(space): - """NOT_RPYTHON""" - class m: pass # fake module - m.__dict__ = globals() -''' - - RPY_INIT_FOOTER = ''' -# entry point: %(entrypointname)s, %(entrypoint)s) -if __name__ == "__main__": - from pypy.objspace.std import StdObjSpace - space = StdObjSpace() - init%(modname)s(space) - print space.unwrap(space.call( - gfunc_%(entrypointname)s, space.newtuple([]))) -''' - -# a translation table suitable for str.translate() to remove -# non-C characters from an identifier -C_IDENTIFIER = ''.join([(('0' <= chr(i) <= '9' or - 'a' <= chr(i) <= 'z' or - 'A' <= chr(i) <= 'Z') and chr(i) or '_') - for i in range(256)]) - -# temporary arg parsing -# what about keywords? Gateway doesn't support it. -def PyArg_ParseMini(space, name, minargs, maxargs, args_w, defaults_w): - err = None - if len(args_w) < minargs: - txt = "%s() takes at least %d argument%s (%d given)" - plural = ['s', ''][minargs == 1] - err = (name, minargs, plural, len(args_w)) - if len(args_w) > maxargs: - plural = ['s', ''][maxargs == 1] - if minargs == maxargs: - if minargs == 0: - txt = '%s() takes no arguments (%d given)' - err = (name, len(args_w)) - elif minargs == 1: - txt = '%s() takes exactly %d argument%s (%d given)' - err = (name, maxargs, plural, len(args_w)) - else: - txt = '%s() takes at most %d argument%s (%d given)' - err = (name, maxargs, plural, len(args_w)) - if err: - w_txt = space.wrap(txt) - w_tup = space.wrap(err) - w_txt = space.mod(w_txt, w_tup) - raise OperationError(space.w_TypeError, w_txt) - - # finally, we create the result ;-) - res_w = args_w + defaults_w[len(args_w) - minargs:] - assert len(res_w) == maxargs - return res_w -# _____________________________________________________________________ - -## this should go into some test file - -def somefunc(arg): - pass - -def f(a,b): -## print "start" - a = [] - a.append(3) - for i in range(3): - pass#print i - if a > b: - try: - if b == 123: - raise ValueError - elif b == 321: - raise IndexError - return 123 - except ValueError: - raise TypeError - else: - dummy = somefunc(23) - return 42 - -class TestClass:pass - -def ff(a, b, c=3,*rest): - try: - try: - if rest: - raise SystemError, 42 - return a+b - finally: - a = 7 - if rest: - return len(rest),c - except TypeError: - print "eek" - -glob = 100 -def fff(): - global glob - return 42+glob - -def app_mod__String_ANY(format, values): - import _formatting - if isinstance(values, tuple): - return _formatting.format(format, values, None) - else: - if hasattr(values, 'keys'): - return _formatting.format(format, (values,), values) - else: - return _formatting.format(format, (values,), None) - -def app_str_decode__String_ANY_ANY(str, encoding=None, errors=None): - if encoding is None and errors is None: - return unicode(str) - elif errors is None: - return unicode(str, encoding) - else: - return unicode(str, encoding, errors) - - -def test_md5(): - #import md5 - # how do I avoid the builtin module? - from pypy.appspace import md5 - digest = md5.new("hello").hexdigest() - return digest - -def test_mod(): - return app_mod__String_ANY("-%s-", ["hallo"]) - -def test_join(): - return " ".join(["hi", "there"]) - -# cannot nest local classes, yet -# this appears to be a problem in flow space. -class AnIterClass(object): - def __init__(self): - self.lis = [c for c in "test"] - def next(self): - if self.lis: - return self.lis.pop() - raise StopIteration - def __iter__(self): - return self - -def test_iter(): - res = [] - for i in "hallo": - res.append(i) - for i in AnIterClass(): - res.append(i) - return res - -def test_strutil(): - from pypy.objspace.std import strutil - return (strutil.string_to_int("42"), - strutil.string_to_long("12345678901234567890")) - -def test_struct(): - from pypy.appspace import struct - import struct as stru - res1 = stru.pack('f',1.23), struct.pack('f',1.23) - res2 = struct.unpack('f', struct.pack('f',1.23)) - return res1, res2 - -def all_entries(): - res = [func() for func in entry_points[:-1]] - return res - -entry_points = (lambda: f(2, 3), - lambda: ff(2, 3, 5), - fff, - lambda: app_str_decode__String_ANY_ANY("hugo"), - test_mod, - test_md5, - test_join, - test_iter, - test_strutil, - test_struct, - all_entries) -entry_point = entry_points[-2] - -if __name__ == "__main__": - import os, sys - from pypy.interpreter import autopath - srcdir = os.path.dirname(autopath.pypydir) - appdir = os.path.join(autopath.pypydir, 'appspace') - - if appdir not in sys.path: - sys.path.insert(0, appdir) - - t = Translator(entry_point, verbose=False, simplifying=True) - gen = GenRpy(t) - gen.use_fast_call = True - import pypy.appspace.generated as tmp - pth = os.path.dirname(tmp.__file__) - ftmpname = "d:/tmp/look.py" - fname = os.path.join(pth, gen.modname+".py") - gen.gen_source(fname, ftmpname) - - #t.simplify() - #t.view() - # debugging - graph = t.getflowgraph() - ab = ordered_blocks(graph) # use ctrl-b in PyWin with ab - -def crazy_test(): - """ this thingy is generating the whole interpreter in itself""" - # but doesn't work, my goto's give a problem for flow space - dic = {"__builtins__": __builtins__, "__name__": "__main__"} - execfile("d:/tmp/look.py", dic) - - def test(): - f_ff(space, 2, 3) - test = type(test)(test.func_code, dic) - - t = Translator(test, verbose=False, simplifying=True) - gen = GenRpy(t) - gen.gen_source("d:/tmp/look2.py") diff --git a/pypy/translator/genpyrex.py b/pypy/translator/genpyrex.py deleted file mode 100644 index 8c936d18cf..0000000000 --- a/pypy/translator/genpyrex.py +++ /dev/null @@ -1,465 +0,0 @@ -""" -generate Pyrex files from the flowmodel. - -""" -from pypy.interpreter.baseobjspace import ObjSpace -from pypy.interpreter.argument import Arguments -from pypy.objspace.flow.model import Variable, Constant, UndefinedConstant -from pypy.objspace.flow.model import mkentrymap, last_exception -from pypy.translator.annrpython import RPythonAnnotator -from pypy.annotation.model import SomePBC -from pypy.annotation.classdef import isclassdef -import inspect - -class Op: - def __init__(self, operation, gen, block): - self._str = gen._str - self.gen = gen - self.argnames = [self._str(arg, block) for arg in operation.args] - self.resultname = self._str(operation.result, block) - self.op = operation - #op.opname - - def __call__(self): - operator = self.gen.ops.get(self.op.opname, self.op.opname) - args = self.argnames - if not (operator[0] >= "a" and operator[0] <= "z"): - if len(args) == 1: - return "%s = %s %s" % (self.resultname, operator) + args - elif len(args) == 2: - #Inplace operators - inp=['+=','-=','*=','/=','%=','&=','|=','^=','//=', - '<<=','>>=','**='] - if operator in inp: - return "%s = %s; %s %s %s" % (self.resultname, args[0], - self.resultname, operator, args[1]) - else: - return "%s = %s %s %s" % (self.resultname, args[0], operator, args[1]) - elif len(args) == 3 and operator == "**": #special case, have to handle it manually - return "%s = pow(%s, %s, %s)" % (self.resultname,) + args - else: - raise NotImplementedError, "I don't know to handle the operator %s (arity %s)" \ - % (operator, len(args)) - else: - method = getattr(self, "op_%s" % operator, self.generic_op) - return method() - - def ispythonident(self, s): - if s[0] not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_": - return False - for c in s[1:]: - if (c not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_" - "0123456789"): - return False - return True - - - def generic_op(self): - """Generic handler for all operators, which I don't handle explicitly""" - - return "%s = %s(%s)" % (self.resultname, self.op.opname, ", ".join(self.argnames)) - - def op_next(self): - args = self.argnames - return "%s = %s.next()" % (self.resultname, args[0]) - - def op_getitem(self): - direct = "%s = %s[%s]" % ((self.resultname,) + tuple(self.argnames)) - w_sequence, w_index = self.op.args - tp = self.gen.get_type(w_index) - if tp is int: - return direct - else: - # the index could be a slice - indexname = self.argnames[1] - lines = [] - if tp is slice: # XXX do this better - lines.append('if 1:') - else: - lines.append('from types import SliceType') - lines.append('if isinstance(%s, SliceType):' % indexname) - lines.append(' assert %s.step is None' % indexname) - lines.append(' %s = %s[%s.start:%s.stop]' % (self.resultname, - self.argnames[0], - indexname, - indexname)) - lines.append('else:') - lines.append(' ' + direct) - return "\n".join(lines) - - def op_newtuple(self): - if self.argnames: - return "%s = (%s,)" % (self.resultname, ", ".join(self.argnames)) - else: - return "%s = ()" % self.resultname - - def op_newlist(self): - if self.argnames: - return "%s = [%s,]" % (self.resultname, ", ".join(self.argnames)) - else: - return "%s = []" % self.resultname - - def op_newdict(self): - pairs = [] - for i in range(0, len(self.argnames), 2): - pairs.append("%s: %s, " % (self.argnames[i], self.argnames[i+1])) - return "%s = {%s}" % (self.resultname, "".join(pairs)) - - def op_newslice(self): - a = self.argnames - return "%s = slice(%s, %s, %s)" % (self.resultname, a[0], a[1], a[2]) - - def op_call_args(self): - a = self.argnames - shape = self.op.args[1].value - args = Arguments.fromshape(None, shape, a[2:]) - lst = args.arguments_w[:] - for key, value in args.kwds_w: - lst.append("%s=%s" % (key, value)) - if args.w_stararg is not None: - lst.append("*%s" % args.w_stararg) - return "%s = %s(%s)" % (self.resultname, a[0], ", ".join(lst)) - - def op_simple_call(self): - a = self.argnames - return "%s = %s(%s)" % (self.resultname, a[0], ", ".join(a[1:])) - - def op_setitem(self): - a = self.argnames - return "%s[%s] = %s" % (a[0], a[1], a[2]) - - def op_getattr(self): - args = self.argnames - attr = self.op.args[1] - if isinstance(attr, Constant) and self.ispythonident(attr.value): - return "%s = %s.%s" % (self.resultname, args[0], attr.value) - else: - return "%s = getattr(%s)" % (self.resultname, ", ".join(args)) - - def op_setattr(self): - args = self.argnames - attr = self.op.args[1] - if isinstance(attr, Constant) and self.ispythonident(attr.value): - return "%s.%s = %s" % (args[0], attr.value, args[2]) - else: - return "setattr(%s, %s, %s)" % args - - def op_not(self): - return "%s = not %s" % (self.resultname, self.argnames[0]) - - def op_is_true(self): - return "%s = not not %s" % (self.resultname, self.argnames[0]) - -class GenPyrex: - def __init__(self, functiongraph): - self.functiongraph = functiongraph - ops = {} - oparity = {} - for (opname, opsymbol, arity, _) in ObjSpace.MethodTable: - ops[opname] = opsymbol - oparity[opname] = arity - self.ops = ops - self.oparity = oparity - self.annotator = None - self.namecache = {} - - def annotate(self, input_arg_types): - a = RPythonAnnotator() - a.build_types(self.functiongraph, input_arg_types) - self.setannotator(a) - - def setannotator(self, annotator): - self.annotator = annotator - - def emitcode(self, public=True): - self.blockids = {} - #self.variablelocations = {} - self.lines = [] - self.indent = 0 - self.gen_graph(public) - return "\n".join(self.lines) - - def putline(self, line): - for l in line.split('\n'): - self.lines.append(" " * self.indent + l) - - def gen_graph(self, public=True): - fun = self.functiongraph - self.entrymap = mkentrymap(fun) - currentlines = self.lines - self.lines = [] - self.indent += 1 - self.gen_block(fun.startblock) - self.indent -= 1 - # emit the header after the body - functionbodylines = self.lines - self.lines = currentlines - inputargnames = [ " ".join(self._paramvardecl(var)) for var in fun.getargs() ] - params = ", ".join(inputargnames) - returntype = self.get_type(fun.getreturnvar()) - returntypename = self._gettypename(returntype) - try: - function_object = self.by_the_way_the_function_was # XXX! - except AttributeError: - def function_object(): pass # XXX!!! - if public: - # make the function visible from the outside - # under its original name - args = ', '.join([var.name for var in fun.getargs()]) - self.putline("def %s(%s):" % (fun.name, args)) - self.indent += 1 - self.putline("return %s(%s)" % ( - self.getfunctionname(function_object), args)) - self.indent -= 1 - # go ahead with the mandled header and body of the function - self.putline("cdef %s %s(%s):" % ( - returntypename, - self.getfunctionname(function_object), - params)) - self.indent += 1 - #self.putline("# %r" % self.annotations) - decllines = [] - missing_decl = [] - funargs = fun.getargs() - for block in self.blockids: - for var in block.getvariables(): - if var not in funargs: - decl = self._vardecl(var) - if decl: - decllines.append(decl) - else: - missing_decl.append(self.get_varname(var)) - if missing_decl: - missing_decl.sort() - decllines.append('# untyped variables: ' + ' '.join(missing_decl)) - decllines.sort() - for decl in decllines: - self.putline(decl) - self.indent -= 1 - self.lines.extend(functionbodylines) - - def get_type(self, var): - if isinstance(var, Constant): - return type(var.value) - elif self.annotator: - return self.annotator.gettype(var) - else: - return None - - def get_varname(self, var): - vartype = self.get_type(var) - if vartype in (int, bool): - prefix = "i_" - elif self.annotator and vartype in self.annotator.getuserclasses(): - prefix = "p_" - else: - prefix = "" - return prefix + var.name - - def _paramvardecl(self, var): - vartype = self.get_type(var) - ctype=self._gettypename(vartype) - return (ctype, self.get_varname(var)) - - def _gettypename(self, vartype): - if vartype in (int, bool): - ctype = "int" - elif (self.annotator and vartype in self.annotator.getuserclasses() - and vartype.__module__ != '__builtin__'): - ctype = self.getclassname(vartype) - else: - ctype = "object" - return ctype - - def _vardecl(self, var): - vartype, varname = self._paramvardecl(var) - if vartype != "object": - return "cdef %s %s" % (vartype, varname) - else: - return "" - - def getclassname(self,cls): - assert inspect.isclass(cls) - name = cls.__name__ - if issubclass(cls,Exception): - return name - return '%s__%x' % (name, id(cls))#self._hackname(cls) - - def getfunctionname(self,func): - # NB. the purpose of the cache is not performance, but to ensure that - # two methods that compare equal get the same name. - if inspect.ismethod(func) and func.im_self is None: - func = func.im_func # consider unbound methods as plain functions - try: - return self.namecache[func] - except KeyError: - assert inspect.isfunction(func) or inspect.ismethod(func) - name = '%s__%x' % (func.__name__, id(func))#self._hackname(func) - self.namecache[func] = name - return name - - def getvarname(self,var): - assert inspect.isclass(var) - return self._hackname(var) - - def _str(self, obj, block): - if isinstance(obj, Variable): - #self.variablelocations[obj] = block - return self.get_varname(obj) - elif isinstance(obj, Constant): - import types - if isinstance(obj.value,(types.ClassType,type)): - fff=self.getclassname(obj.value) - elif isinstance(obj.value,(types.FunctionType, - types.MethodType, - type)): - fff=self.getfunctionname(obj.value) - elif isinstance(obj.value, types.BuiltinFunctionType): - fff=str(obj.value.__name__) - else: - #fff=self._hackname(obj.value) - fff=repr(obj.value) - if isinstance(obj.value,( int,long)): - fff = repr(int(obj.value)) - return fff - else: - raise TypeError("Unknown class: %s" % obj.__class__) - - def gen_block(self, block): - if self.blockids.has_key(block): - self.putline('cinline "goto Label%s;"' % self.blockids[block]) - return - - blockids = self.blockids - blockids.setdefault(block, len(blockids)) - - #the label is only written if there are multiple refs to the block - if len(self.entrymap[block]) > 1: - self.putline('cinline "Label%s:"' % blockids[block]) - - if block.exitswitch == Constant(last_exception): - catch_exc = len(block.operations)-1 - else: - catch_exc = None - - for i, op in zip(range(len(block.operations)), block.operations): - if i == catch_exc: - self.putline("try:") - self.indent += 1 - opg = Op(op, self, block) - self.putline(opg()) - if i == catch_exc: - # generate all exception handlers - self.indent -= 1 - exits = block.exits - for exit in exits[1:]: - self.putline("except %s, last_exc_value:" % - exit.exitcase.__name__) - self.indent += 1 - self.putline("last_exception = last_exc_value.__class__") - self.gen_link(block, exit) - self.indent -= 1 - self.putline("else:") # no-exception case - self.indent += 1 - assert exits[0].exitcase is None - self.gen_link(block, exits[0]) - self.indent -= 1 - break - - else: - exits = block.exits - if len(exits) == 1: - self.gen_link(block, exits[0]) - elif len(exits) > 1: - varname = self._str(block.exitswitch, block) - for i in range(len(exits)): - exit = exits[-i-1] # reverse order - cond = self._str(Constant(exit.exitcase), block) - if i == 0: - self.putline("if %s == %s:" % (varname, cond)) - elif i < len(exits) - 1: - self.putline("elif %s == %s:" % (varname, cond)) - else: - self.putline("else: # %s == %s" % (varname, cond)) - self.indent += 1 - self.gen_link(block, exit) - self.indent -= 1 - elif len(block.inputargs) == 2: # exc_cls, exc_value - exc_cls = self._str(block.inputargs[0], block) - exc_value = self._str(block.inputargs[1], block) - self.putline("raise %s, %s" % (exc_cls, exc_value)) - else: - self.putline("return %s" % self._str(block.inputargs[0], block)) - - def gen_link(self, prevblock, link): - _str = self._str - block = link.target - sourceargs = link.args - targetargs = block.inputargs - assert len(sourceargs) == len(targetargs) - # get rid of identity-assignments and assignments of UndefinedConstant - sargs, targs = [], [] - for s,t in zip(sourceargs, targetargs): - if s != t and not isinstance(s, UndefinedConstant): - sargs.append(s) - targs.append(t) - if sargs: - sargs = [_str(arg, prevblock) for arg in sargs] - targs = [_str(arg, block) for arg in targs] - self.putline("%s = %s" % (", ".join(targs), ", ".join(sargs))) - - self.gen_block(block) - - def globaldeclarations(self,): - """Generate the global class declaration for a group of functions.""" - if self.annotator: - self.lines = [] - self.indent = 0 - delay_methods={} - for cls in self.annotator.getuserclassdefinitions(): - if cls.basedef: - bdef="(%s)" % (self.getclassname(cls.basedef.cls)) - else: - bdef="" - self.putline("cdef class %s%s:" % (self.getclassname(cls.cls),bdef)) - self.indent += 1 - empty = True - for attr, attrdef in cls.attrs.items(): - s_value = attrdef.s_value - if isinstance(s_value, SomePBC): - for py_fun,fun_class in s_value.prebuiltinstances.items(): - assert isclassdef(fun_class), ("don't support " - "prebuilt constants like %r" % py_fun) - delay_methods.setdefault(fun_class,[]).append(py_fun) - else: - vartype=self._gettypename(s_value.knowntype) - self.putline("cdef public %s %s" % (vartype, attr)) - empty = False - list_methods=delay_methods.get(cls,[]) - for py_fun in list_methods: - # XXX! - try: - fun = self.annotator.translator.flowgraphs[py_fun] - except KeyError: - continue # method present in class but never called - hackedargs = ', '.join([var.name for var in fun.getargs()]) - self.putline("def %s(%s):" % (py_fun.__name__, hackedargs)) - self.indent += 1 - # XXX special case hack: cannot use 'return' in __init__ - if py_fun.__name__ == "__init__": - statement = "" - else: - statement = "return " - self.putline("%s%s(%s)" % (statement, - self.getfunctionname(py_fun), - hackedargs)) - self.indent -= 1 - empty = False - if empty: - self.putline("pass") - self.indent -= 1 - self.putline("") - return '\n'.join(self.lines) - else: - return '' - diff --git a/pypy/translator/java/Builtin.java b/pypy/translator/java/Builtin.java deleted file mode 100644 index df9776541e..0000000000 --- a/pypy/translator/java/Builtin.java +++ /dev/null @@ -1,13 +0,0 @@ - -class builtin_len extends PyObject { - PyObject op_simple_call(PyObject x) - { - return new PyInt(x.len()); - } -} - -class Builtin { - - static PyObject len = new builtin_len(); - -} diff --git a/pypy/translator/java/PyBool.java b/pypy/translator/java/PyBool.java deleted file mode 100644 index f94acd0056..0000000000 --- a/pypy/translator/java/PyBool.java +++ /dev/null @@ -1,9 +0,0 @@ - - -class PyBool extends PyInt { - - PyBool(boolean x) { - super(x ? 1 : 0); - } - -}; diff --git a/pypy/translator/java/PyInt.java b/pypy/translator/java/PyInt.java deleted file mode 100644 index 2320320013..0000000000 --- a/pypy/translator/java/PyInt.java +++ /dev/null @@ -1,47 +0,0 @@ - - -class PyInt extends PyObject { - - int intval; - - PyInt(int x) { - intval = x; - } - - boolean is_true() { - return intval != 0; - } - - boolean eq(PyObject other) { - if (other instanceof PyInt) - return intval == ((PyInt) other).intval; - else - return false; - } - - boolean lt(PyObject other) { - if (other instanceof PyInt) - return intval < ((PyInt) other).intval; - else - throw new TypeError(); - } - - PyObject op_neg() { - return new PyInt(-intval); - } - - PyObject op_add(PyObject other) { - if (other instanceof PyInt) - return new PyInt(intval + ((PyInt) other).intval); - else - throw new TypeError(); - } - - PyObject op_sub(PyObject other) { - if (other instanceof PyInt) - return new PyInt(intval - ((PyInt) other).intval); - else - throw new TypeError(); - } - -}; diff --git a/pypy/translator/java/PyList.java b/pypy/translator/java/PyList.java deleted file mode 100644 index daee202f88..0000000000 --- a/pypy/translator/java/PyList.java +++ /dev/null @@ -1,74 +0,0 @@ - - -class PyList extends PySequence { - - PyList(PyObject[] x) { - super(x); - } - - PyObject op_setitem(PyObject index, PyObject o) { - if (index instanceof PyInt) { - int i = ((PyInt) index).intval; - items[i] = o; - return null; - } - throw new TypeError(); - } - - PyObject op_add(PyObject other) { - if (other instanceof PyList) { - PyObject[] items2 = ((PyList) other).items; - PyObject[] nitems = new PyObject[items.length + items2.length]; - System.arraycopy(items, 0, nitems, 0, items.length); - System.arraycopy(items2, 0, nitems, items.length, items2.length); - return new PyList(nitems); - } - throw new TypeError(); - } - - PyObject op_inplace_add(PyObject other) { - if (other instanceof PySequence) { - PyObject[] items2 = ((PySequence) other).items; - PyObject[] nitems = new PyObject[items.length + items2.length]; - System.arraycopy(items, 0, nitems, 0, items.length); - System.arraycopy(items2, 0, nitems, items.length, items2.length); - items = nitems; - return this; - } - throw new TypeError(); - } - - PyObject op_mul(PyObject other) { - if (items.length == 1 && other instanceof PyInt) { - int i, count = ((PyInt) other).intval; - PyObject item = items[0]; - PyObject[] nitems = new PyObject[count]; - for (i=0; i<count; i++) - nitems[i] = item; - return new PyList(nitems); - } - throw new TypeError(); - } - - PyObject op_inplace_mul(PyObject other) { - if (items.length == 1 && other instanceof PyInt) { - int i, count = ((PyInt) other).intval; - PyObject item = items[0]; - PyObject[] nitems = new PyObject[count]; - for (i=0; i<count; i++) - nitems[i] = item; - items = nitems; - return this; - } - throw new TypeError(); - } - - PyObject meth_append(PyObject item) { - PyObject[] nitems = new PyObject[items.length + 1]; - System.arraycopy(items, 0, nitems, 0, items.length); - nitems[items.length] = item; - items = nitems; - return null; - } - -}; diff --git a/pypy/translator/java/PyObject.java b/pypy/translator/java/PyObject.java deleted file mode 100644 index 7f02df708e..0000000000 --- a/pypy/translator/java/PyObject.java +++ /dev/null @@ -1,46 +0,0 @@ - - -class PyObject { - - static PyBool Py_False = new PyBool(false); - static PyBool Py_True = new PyBool(true); - - boolean is_true() { return true; } - PyObject op_is_true() { return new PyBool(is_true()); } - - boolean eq(PyObject other) { return this == other; } - boolean lt(PyObject other) { throw new TypeError(); } - PyObject op_lt(PyObject other) { return new PyBool(lt(other)); } - PyObject op_le(PyObject other) { return new PyBool(!other.lt(this)); } - PyObject op_eq(PyObject other) { return new PyBool(eq(other)); } - PyObject op_ne(PyObject other) { return new PyBool(!eq(other)); } - PyObject op_gt(PyObject other) { return new PyBool(other.lt(this)); } - PyObject op_ge(PyObject other) { return new PyBool(!lt(other)); } - - PyObject op_neg() { throw new TypeError(); } - PyObject op_add(PyObject other) { throw new TypeError(); } - PyObject op_sub(PyObject other) { throw new TypeError(); } - PyObject op_mul(PyObject other) { throw new TypeError(); } - PyObject op_inplace_add(PyObject other) { return op_add(other); } - PyObject op_inplace_sub(PyObject other) { return op_sub(other); } - PyObject op_inplace_mul(PyObject other) { return op_mul(other); } - - int len() { throw new TypeError(); } - PyObject op_getitem(PyObject index) { throw new TypeError(); } - PyObject op_setitem(PyObject index, PyObject o) { throw new TypeError(); } - PyObject[] unpack(int expected_length, PyObject[] defaults) { - throw new TypeError(); - } - - PyObject getattr(String attr) { throw new TypeError(); } - PyObject op_getattr(PyObject attr) { - return getattr(((PyString) attr).string); - } - - /* CUT HERE -- automatically generated code below this line */ - PyObject meth_append(PyObject x) { - return getattr("append").op_simple_call(x); - } - PyObject op_simple_call() { throw new TypeError(); } - PyObject op_simple_call(PyObject x) { throw new TypeError(); } -}; diff --git a/pypy/translator/java/PySequence.java b/pypy/translator/java/PySequence.java deleted file mode 100644 index 389bb206c7..0000000000 --- a/pypy/translator/java/PySequence.java +++ /dev/null @@ -1,41 +0,0 @@ - - -class PySequence extends PyObject { - - PyObject[] items; - - PySequence(PyObject[] x) { - items = x; - } - - boolean is_true() { - return items.length != 0; - } - - int len() { - return items.length; - } - - PyObject op_getitem(PyObject index) { - if (index instanceof PyInt) { - int i = ((PyInt) index).intval; - return items[i]; - } - throw new TypeError(); - } - - PyObject[] unpack(int expected_length, PyObject[] defaults) { - int missing = expected_length - items.length; - if (missing == 0) - return items; - if (defaults == null || missing < 0 || missing > defaults.length) { - throw new TypeError(); - } - PyObject[] result = new PyObject[expected_length]; - System.arraycopy(items, 0, result, 0, items.length); - System.arraycopy(defaults, defaults.length-missing, - result, items.length, missing); - return result; - } - -}; diff --git a/pypy/translator/java/PyString.java b/pypy/translator/java/PyString.java deleted file mode 100644 index eb4895bfa9..0000000000 --- a/pypy/translator/java/PyString.java +++ /dev/null @@ -1,48 +0,0 @@ - - -class PyString extends PyObject { - - String string; - - PyString(String data) { - string = data; - } - - boolean is_true() { - return string.length() != 0; - } - - int len() { - return string.length(); - } - - boolean eq(PyObject other) { - if (other instanceof PyString) - return string.equals(((PyString) other).string); - else - return false; - } - - boolean lt(PyObject other) { - if (other instanceof PyString) - return string.compareTo(((PyString) other).string) < 0; - else - throw new TypeError(); - } - - PyObject op_getitem(PyObject index) { - if (index instanceof PyInt) { - int i = ((PyInt) index).intval; - return new PyString(string.substring(i, i+1)); - } - throw new TypeError(); - } - - PyObject op_add(PyObject other) { - if (other instanceof PyString) { - return new PyString(string.concat(((PyString) other).string)); - } - throw new TypeError(); - } - -}; diff --git a/pypy/translator/java/PyTuple.java b/pypy/translator/java/PyTuple.java deleted file mode 100644 index 947a1a453a..0000000000 --- a/pypy/translator/java/PyTuple.java +++ /dev/null @@ -1,20 +0,0 @@ - - -class PyTuple extends PySequence { - - PyTuple(PyObject[] x) { - super(x); - } - - PyObject op_add(PyObject other) { - if (other instanceof PyTuple) { - PyObject[] items2 = ((PyTuple) other).items; - PyObject[] nitems = new PyObject[items.length + items2.length]; - System.arraycopy(items, 0, nitems, 0, items.length); - System.arraycopy(items2, 0, nitems, items.length, items2.length); - return new PyTuple(nitems); - } - throw new TypeError(); - } - -}; diff --git a/pypy/translator/java/TypeError.java b/pypy/translator/java/TypeError.java deleted file mode 100644 index a0f2566a77..0000000000 --- a/pypy/translator/java/TypeError.java +++ /dev/null @@ -1,5 +0,0 @@ - - -class TypeError extends RuntimeException { - -}; diff --git a/pypy/translator/java/__init__.py b/pypy/translator/java/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/pypy/translator/java/__init__.py +++ /dev/null diff --git a/pypy/translator/java/autopath.py b/pypy/translator/java/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/translator/java/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/translator/java/genjava.py b/pypy/translator/java/genjava.py deleted file mode 100644 index 1db8099e32..0000000000 --- a/pypy/translator/java/genjava.py +++ /dev/null @@ -1,600 +0,0 @@ -""" -Generate a Java source file from the flowmodel. - -""" - -import sys, os -from pypy.objspace.flow.model import traverse -from pypy.objspace.flow import FlowObjSpace -from pypy.objspace.flow.model import FunctionGraph, Block, Link, Variable, Constant -from pypy.objspace.flow.model import last_exception, last_exc_value, checkgraph -from pypy.translator.simplify import remove_direct_loops -from pypy.interpreter.error import OperationError -from types import FunctionType - -from pypy.translator.translator import Translator - - -def ordered_blocks(graph): - # collect all blocks - allblocks = [] - def visit(block): - if isinstance(block, Block): - # first we order by offset in the code string - if block.operations: - ofs = block.operations[0].offset - else: - ofs = sys.maxint - # then we order by input variable name or value - if block.inputargs: - txt = str(block.inputargs[0]) - else: - txt = "dummy" - allblocks.append((ofs, txt, block)) - traverse(visit, graph) - allblocks.sort() - #for ofs, txt, block in allblocks: - # print ofs, txt, block - return [block for ofs, txt, block in allblocks] - - -class GenJava: - def __init__(self, jdir, translator, modname=None): - self.jdir = jdir - self.translator = translator - self.modname = (modname or - translator.functions[0].__name__) - self.javanames = {Constant(None).key: 'null', - Constant(False).key: 'Py_False', - Constant(True).key: 'Py_True', - } - - self.seennames = {} - self.initcode = [] # list of lines for the module's initxxx() - self.latercode = [] # list of generators generating extra lines - # for later in initxxx() -- for recursive - # objects - self.globaldecl = [] - self.globalobjects = [] - self.pendingfunctions = [] - self.callpatterns = {} # for now, arities seen in simple_call() - - # special constructors: - self.has_listarg = { - 'newlist': 'PyList', - 'newtuple': 'PyTuple', - 'newdict': 'PyDict', - 'newstring':'PyString', - } - - self.nameof(self.translator.functions[0]) - self.gen_source() - - def gen_test_class(self, inputargs, expectedresult): - entrypoint = self.nameof(self.translator.functions[0]) - f = self.jdir.join('test.java').open('w') - print >> f, 'class test extends PyObject {' - print >> f, ' static public void main(String[] argv) {' - print >> f, ' PyObject result = %s.op_simple_call(%s);' % ( - entrypoint, ', '.join([self.nameof(x) for x in inputargs])) - print >> f, ' if (result.eq(%s))' % ( - self.nameof(expectedresult),) - print >> f, ' System.out.println("OK");' - print >> f, ' else' - print >> f, ' System.out.println("FAIL");' - print >> f, ' }' - self.gen_initcode(f) - print >> f, '};' - f.close() - - def nameof(self, obj): - key = Constant(obj).key - try: - return self.javanames[key] - except KeyError: - #name = "w(%s)" % str(obj) - #self.javanames[key] = name - #return name - if (type(obj).__module__ != '__builtin__' and - not isinstance(obj, type)): # skip user-defined metaclasses - # assume it's a user defined thingy - name = self.nameof_instance(obj) - else: - for cls in type(obj).__mro__: - meth = getattr(self, - 'nameof_' + cls.__name__.replace(' ', ''), - None) - if meth: - break - else: - raise Exception, "nameof(%r) in %r" % (obj, self.current_func) - name = meth(obj) - self.javanames[key] = name - return name - - def uniquename(self, basename): - n = self.seennames.get(basename, 0) - self.seennames[basename] = n+1 - if n == 0: - self.globalobjects.append(basename) - self.globaldecl.append('static PyObject *%s;' % (basename,)) - return basename - else: - return self.uniquename('%s_%d' % (basename, n)) - - def nameof_object(self, value): - if type(value) is not object: - raise Exception, "nameof(%r) in %r" % (value, self.current_func) - name = self.uniquename('g_object') - self.initcode.append('INITCHK(%s = PyObject_CallFunction((PyObject*)&PyBaseObject_Type, ""))'%name) - return name - - def nameof_module(self, value): - assert value is os or not hasattr(value, "__file__") or \ - not (value.__file__.endswith('.pyc') or - value.__file__.endswith('.py') or - value.__file__.endswith('.pyo')), \ - "%r is not a builtin module (probably :)"%value - name = self.uniquename('mod%s'%value.__name__) - self.initcode.append('INITCHK(%s = PyImport_ImportModule("%s"))'%(name, value.__name__)) - return name - - - def nameof_int(self, value): - return "new PyInt(%d)" % value - - def nameof_long(self, value): - # assume we want them in hex most of the time - if value < 256L: - return "%dL" % value - else: - return "0x%08xL" % value - - def nameof_float(self, value): - return "w(%s)" % value - - def nameof_str(self, value): - content = [] - for c in value: - if not (' ' <= c < '\x7f'): - c = '\\%03o' % ord(c) - content.append(c) - return 'new PyString("%s")' % (''.join(content),) - - def skipped_function(self, func): - # debugging only! Generates a placeholder for missing functions - # that raises an exception when called. - name = self.uniquename('gskippedfunc_' + func.__name__) - self.globaldecl.append('static PyMethodDef ml_%s = { "%s", &skipped, METH_VARARGS };' % (name, name)) - self.initcode.append('INITCHK(%s = PyCFunction_New(' - '&ml_%s, NULL))' % (name, name)) - self.initcode.append('\tPy_INCREF(%s);' % name) - self.initcode.append('\tPyCFunction_GET_SELF(%s) = %s;' % (name, name)) - return name - - def nameof_function(self, func): - printable_name = '(%s:%d) %s' % ( - func.func_globals.get('__name__', '?'), - func.func_code.co_firstlineno, - func.__name__) - if self.translator.frozen: - if func not in self.translator.flowgraphs: - print "NOT GENERATING", printable_name - return self.skipped_function(func) - else: - if (func.func_doc and - func.func_doc.lstrip().startswith('NOT_RPYTHON')): - print "skipped", printable_name - return self.skipped_function(func) - name = self.uniquename('gfunc_' + func.__name__) - self.initcode.append('static PyObject %s = new C_%s();' % (name, name)) - self.pendingfunctions.append(func) - return name - - def nameof_staticmethod(self, sm): - # XXX XXX XXXX - func = sm.__get__(42.5) - name = self.uniquename('gsm_' + func.__name__) - functionname = self.nameof(func) - self.initcode.append('INITCHK(%s = PyCFunction_New(' - '&ml_%s, NULL))' % (name, functionname)) - return name - - def nameof_instancemethod(self, meth): - if meth.im_self is None: - # no error checking here - return self.nameof(meth.im_func) - else: - ob = self.nameof(meth.im_self) - func = self.nameof(meth.im_func) - typ = self.nameof(meth.im_class) - name = self.uniquename('gmeth_'+meth.im_func.__name__) - self.initcode.append( - 'INITCHK(%s = gencfunc_descr_get(%s, %s, %s))'%( - name, func, ob, typ)) - return name - - def should_translate_attr(self, pbc, attr): - ann = self.translator.annotator - if ann is None: - ignore = getattr(pbc.__class__, 'NOT_RPYTHON_ATTRIBUTES', []) - if attr in ignore: - return False - else: - return "probably" # True - if attr in ann.getpbcattrs(pbc): - return True - classdef = ann.getuserclasses().get(pbc.__class__) - if (classdef and - classdef.about_attribute(attr) != annmodel.SomeImpossibleValue()): - return True - return False - - def later(self, gen): - self.latercode.append(gen) - - def nameof_instance(self, instance): - name = self.uniquename('ginst_' + instance.__class__.__name__) - cls = self.nameof(instance.__class__) - def initinstance(): - content = instance.__dict__.items() - content.sort() - for key, value in content: - if self.should_translate_attr(instance, key): - yield 'INITCHK(SETUP_INSTANCE_ATTR(%s, "%s", %s))' % ( - name, key, self.nameof(value)) - self.initcode.append('INITCHK(SETUP_INSTANCE(%s, %s))' % ( - name, cls)) - self.later(initinstance()) - return name - - def nameof_builtin_function_or_method(self, func): - if func.__self__ is None: - # builtin function - return "Builtin.%s" % func.__name__ - else: - # builtin (bound) method - assert False, "to do" - - def nameof_classobj(self, cls): - if cls.__doc__ and cls.__doc__.lstrip().startswith('NOT_RPYTHON'): - raise Exception, "%r should never be reached" % (cls,) - - metaclass = "&PyType_Type" - if issubclass(cls, Exception): - if cls.__module__ == 'exceptions': - return 'w(%s)'%cls.__name__ - #else: - # # exceptions must be old-style classes (grr!) - # metaclass = "&PyClass_Type" - # For the moment, use old-style classes exactly when the - # pypy source uses old-style classes, to avoid strange problems. - if not isinstance(cls, type): - assert type(cls) is type(Exception) - metaclass = "&PyClass_Type" - - name = self.uniquename('gcls_' + cls.__name__) - basenames = [self.nameof(base) for base in cls.__bases__] - def initclassobj(): - content = cls.__dict__.items() - content.sort() - for key, value in content: - if key.startswith('__'): - if key in ['__module__', '__doc__', '__dict__', - '__weakref__', '__repr__', '__metaclass__']: - continue - # XXX some __NAMES__ are important... nicer solution sought - #raise Exception, "unexpected name %r in class %s"%(key, cls) - if isinstance(value, staticmethod) and value.__get__(1) not in self.translator.flowgraphs and self.translator.frozen: - print value - continue - if isinstance(value, FunctionType) and value not in self.translator.flowgraphs and self.translator.frozen: - print value - continue - - yield 'INITCHK(SETUP_CLASS_ATTR(%s, "%s", %s))' % ( - name, key, self.nameof(value)) - - baseargs = ", ".join(basenames) - if baseargs: - baseargs = ', '+baseargs - self.initcode.append('INITCHK(%s = PyObject_CallFunction((PyObject*) %s,' - %(name, metaclass)) - self.initcode.append('\t\t"s(%s){}", "%s"%s))' - %("O"*len(basenames), cls.__name__, baseargs)) - - self.later(initclassobj()) - return name - - nameof_class = nameof_classobj # for Python 2.2 - - - def nameof_type(self, cls): - return "w(%s)" % cls.__name__ ##?? - if cls in self.typename_mapping: - return '(PyObject*) %s' % self.typename_mapping[cls] - assert cls.__module__ != '__builtin__', \ - "built-in class %r not found in typename_mapping" % (cls,) - return self.nameof_classobj(cls) - - def nameof_tuple(self, tup): - name = self.uniquename('g%dtuple' % len(tup)) - args = [self.nameof(x) for x in tup] - args.insert(0, '%d' % len(tup)) - args = ', '.join(args) - self.initcode.append('INITCHK(%s = PyTuple_Pack(%s))' % (name, args)) - return name - - def nameof_list(self, lis): - name = self.uniquename('g%dlist' % len(lis)) - def initlist(): - for i in range(len(lis)): - item = self.nameof(lis[i]) - yield '\tPy_INCREF(%s);' % item - yield '\tPyList_SET_ITEM(%s, %d, %s);' % (name, i, item) - self.initcode.append('INITCHK(%s = PyList_New(%d))' % (name, len(lis))) - self.later(initlist()) - return name - - def nameof_dict(self, dic): - return 'space.newdict([w("sorry", "not yet"])' - assert dic is not __builtins__ - assert '__builtins__' not in dic, 'Seems to be the globals of %s' % ( - dic.get('__name__', '?'),) - name = self.uniquename('g%ddict' % len(dic)) - def initdict(): - for k in dic: - if type(k) is str: - yield ('\tINITCHK(PyDict_SetItemString' - '(%s, "%s", %s) >= 0)'%( - name, k, self.nameof(dic[k]))) - else: - yield ('\tINITCHK(PyDict_SetItem' - '(%s, %s, %s) >= 0)'%( - name, self.nameof(k), self.nameof(dic[k]))) - self.initcode.append('INITCHK(%s = PyDict_New())' % (name,)) - self.later(initdict()) - return name - - # strange prebuilt instances below, don't look too closely - # XXX oh well. - def nameof_member_descriptor(self, md): - name = self.uniquename('gdescriptor_%s_%s' % ( - md.__objclass__.__name__, md.__name__)) - cls = self.nameof(md.__objclass__) - self.initcode.append('INITCHK(PyType_Ready((PyTypeObject*) %s) >= 0)' % - cls) - self.initcode.append('INITCHK(%s = PyMapping_GetItemString(' - '((PyTypeObject*) %s)->tp_dict, "%s"))' % - (name, cls, md.__name__)) - return name - nameof_getset_descriptor = nameof_member_descriptor - nameof_method_descriptor = nameof_member_descriptor - nameof_wrapper_descriptor = nameof_member_descriptor - - def nameof_file(self, fil): - if fil is sys.stdin: - return 'PySys_GetObject("stdin")' - if fil is sys.stdout: - return 'PySys_GetObject("stdout")' - if fil is sys.stderr: - return 'PySys_GetObject("stderr")' - raise Exception, 'Cannot translate an already-open file: %r' % (fil,) - - def gen_source(self): -## info = { -## 'modname': self.modname, -## 'entrypointname': self.translator.functions[0].__name__, -## 'entrypoint': self.nameof(self.translator.functions[0]), -## } - - # function implementations - while self.pendingfunctions: - func = self.current_func = self.pendingfunctions.pop() - self.gen_javafunction(func) - # collect more of the latercode after each function - self.collect_latercode() - #self.gen_global_declarations() - - # copy over the PyXxx classes - mydir = os.path.dirname(__file__) - for fn in os.listdir(mydir): - if fn.lower().endswith('.java'): - g = open(os.path.join(mydir, fn), 'r') - f = self.jdir.join(fn).open('w') - data = g.read() - i = data.find('/* CUT HERE') - if i < 0: - f.write(data) - else: - f.write(data[:i]) - print >> f - print >> f, ' /* call patterns */' - arities = self.callpatterns.keys() - arities.sort() - for arity in arities: - args = ['PyObject x%d' % i for i in range(arity)] - print >> f, (' PyObject op_simple_call(%s) { throw ' - 'new TypeError(); }' % ', '.join(args)) - print >> f - self.gen_initcode(f) - print >> f, '};' - f.close() - g.close() - - def collect_latercode(self): - while self.latercode: - gen = self.latercode.pop() - #self.initcode.extend(gen) -- eats TypeError! bad CPython! - for line in gen: - self.initcode.append(line) - - def gen_initcode(self, f): - self.collect_latercode() - print >> f, ' /* init code */' - for line in self.initcode: - print >> f, ' ' + line - print >> f - self.initcode = [] - - def gen_javafunction(self, func): - t = self.translator - #t.simplify(func) - graph = t.getflowgraph(func) - remove_direct_loops(graph) - checkgraph(graph) - - name = self.nameof(func) - f = self.jdir.join('C_%s.java' % name).open('w') - try: - src = graph.source - except AttributeError: - pass - else: - print >> f, '//' - for line in src.rstrip().split('\n'): - print >> f, '// ', line - print >> f, '//' - print >> f - print >> f, 'class C_%s extends PyObject {' % name - print >> f - - def expr(v): - if isinstance(v, Variable): - return v.name - elif isinstance(v, Constant): - return self.nameof(v.value) - else: - raise TypeError, "expr(%r)" % (v,) - - def arglist(args, prefix=''): - res = [prefix + expr(arg) for arg in args] - return ", ".join(res) - - def oper(op): - if op.opname in self.has_listarg: - fmt = "PyObject[] a_%s = {%s}; PyObject %s = new %s(a_%s);" - v = expr(op.result) - return fmt % (v, arglist(op.args), v, - self.has_listarg[op.opname], v) - else: - fmt = "PyObject %s = %s.op_%s(%s);" - return fmt % (expr(op.result), expr(op.args[0]), - op.opname, arglist(op.args[1:])) - - def gen_link(link, linklocalvars={}): - "Generate the code to jump across the given Link." - for a1, a2 in zip(link.args, link.target.inputargs): - if a1 in linklocalvars: - src = linklocalvars[a1] - else: - src = expr(a1) - yield "%s = %s;" % (expr(a2), src) - goto = blocknum[link.target] - if goto == blocknum[block]+1: - yield '// falls through' - else: - yield 'block = %d;' % goto - yield 'continue;' - - start = graph.startblock - blocks = ordered_blocks(graph) - nblocks = len(blocks) - - blocknum = {} - localvars = [] - for block in blocks: - blocknum[block] = len(blocknum)+1 - if block is not start: - localvars += block.inputargs - - # create function declaration - args = arglist(start.inputargs, prefix='PyObject ') - self.callpatterns[len(start.inputargs)] = True - print >> f, " PyObject op_simple_call(%s) {" % args - for v in localvars: - print >> f, " PyObject %s = null;" % v - print >> f, " int block = %d; // startblock" % blocknum[start] - print >> f, " while (true) switch (block) {" - - def render_block(block): - catch_exception = block.exitswitch == Constant(last_exception) - regular_op = len(block.operations) - catch_exception - # render all but maybe the last op - for op in block.operations[:regular_op]: - yield "%s" % oper(op) - # render the last op if it is exception handled - for op in block.operations[regular_op:]: - yield "try {" - yield " %s" % oper(op) - - if len(block.exits) == 0: - if len(block.inputargs) == 2: # exc_cls, exc_value - # exceptional return block - exc_cls = expr(block.inputargs[0]) - exc_val = expr(block.inputargs[1]) - XXX - yield "raise OperationError(%s, %s)" % (exc_cls, exc_val) - else: - # regular return block - retval = expr(block.inputargs[0]) - yield "return %s;" % retval - return - elif block.exitswitch is None: - # single-exit block - assert len(block.exits) == 1 - for op in gen_link(block.exits[0]): - yield "%s" % op - elif catch_exception: - XXX - # block catching the exceptions raised by its last operation - # we handle the non-exceptional case first - link = block.exits[0] - assert link.exitcase is None - for op in gen_link(link): - yield " %s" % op - # we must catch the exception raised by the last operation, - # which goes to the last err%d_%d label written above. - for link in block.exits[1:]: - assert issubclass(link.exitcase, Exception) - yield "except OperationError, e:" - print "*"*10, link.exitcase - for op in gen_link(link, { - Constant(last_exception): 'e.w_type', - Constant(last_exc_value): 'e.w_value'}): - yield " %s" % op - else: - # block ending in a switch on a value - exits = list(block.exits) - if len(exits) == 2 and ( - exits[0].exitcase is False and exits[1].exitcase is True): - # order these guys like Python does - exits.reverse() - q = "if" - for link in exits[:-1]: - yield "%s (%s.eq(%s)) {" % (q, expr(block.exitswitch), - self.nameof(link.exitcase)) - for op in gen_link(link): - yield " %s" % op - yield "}" - q = "else if" - link = exits[-1] - yield "else {" - yield " // assert %s.eq(%s)" % (expr(block.exitswitch), - self.nameof(link.exitcase)) - for op in gen_link(exits[-1]): - yield " %s" % op - yield "}" - - for block in blocks: - blockno = blocknum[block] - print >> f - print >> f, " case %d:" % blockno - for line in render_block(block): - print >> f, " %s" % line - - print >> f, " }" - print >> f, " }" - print >> f - print >> f, "};" - f.close() diff --git a/pypy/translator/java/test/__init__.py b/pypy/translator/java/test/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/pypy/translator/java/test/__init__.py +++ /dev/null diff --git a/pypy/translator/java/test/autopath.py b/pypy/translator/java/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/translator/java/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/translator/java/test/test_javatrans.py b/pypy/translator/java/test/test_javatrans.py deleted file mode 100644 index 4e825b3094..0000000000 --- a/pypy/translator/java/test/test_javatrans.py +++ /dev/null @@ -1,70 +0,0 @@ -import autopath, os -import py -from py.process import cmdexec -from pypy.tool.udir import udir -from pypy.translator.java.genjava import GenJava -from pypy.translator.test import snippet -from pypy.translator.translator import Translator - - -def setup_module(mod): - try: - py.path.local.sysfind('javac') - except py.error.ENOENT: - py.test.skip("javac not found") - - -class TestNoTypeCGenTestCase: - - objspacename = 'flow' - - def build_jfunc(self, func): - try: func = func.im_func - except AttributeError: pass - t = Translator(func) - t.simplify() - name = func.func_name - self.jdir = udir.mkdir(name) - self.gen = GenJava(self.jdir, t) - - def check(self, inputargs, expectedresult): - self.gen.gen_test_class(inputargs, expectedresult) - cwd = os.getcwd() - try: - os.chdir(str(self.jdir)) - cmdexec('javac *.java') - assert cmdexec('java test').strip() == 'OK' - finally: - os.chdir(cwd) - - def test_simple_func(self): - self.build_jfunc(snippet.simple_func) - self.check([123], 124) - - def test_if_then_else(self): - self.build_jfunc(snippet.if_then_else) - self.check([2,3,4], 3) - self.check([0,3,4], 4) - self.check([-1,3,4], 3) - - def test_two_plus_two(self): - self.build_jfunc(snippet.two_plus_two) - self.check([], 4) - - def test_sieve_of_eratosthenes(self): - self.build_jfunc(snippet.sieve_of_eratosthenes) - self.check([], 1028) - - def test_string(self): - def tstring(name): - return len("hello " + name) - self.build_jfunc(tstring) - self.check(["world"], 11) - - def DONT_test_list_append(self): - def tlistappend(): - l = [] - l.append(5) - return l[0] - self.build_jfunc(tlistappend) - self.check([], 5) diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py deleted file mode 100644 index 50c8bb860b..0000000000 --- a/pypy/translator/simplify.py +++ /dev/null @@ -1,109 +0,0 @@ -"""Flow Graph Simplification -""" - -from pypy.objspace.flow.model import * - -def eliminate_empty_blocks(graph): - """Eliminate basic blocks that do not contain any operations. - When this happens, we need to replace the preceeding link with the - following link. Arguments of the links should be updated.""" - def visit(link): - if isinstance(link, Link): - while not link.target.operations and len(link.target.exits) == 1: - assert link.target is not link.prevblock, ( - "the graph contains an empty infinite loop") - block1 = link.target - exit = block1.exits[0] - outputargs = [] - for v in exit.args: - if isinstance(v, Variable): - # this variable is valid in the context of block1 - # but it must come from 'link' - i = block1.inputargs.index(v) - v = link.args[i] - outputargs.append(v) - link.args = outputargs - link.target = exit.target - # the while loop above will simplify recursively the new link - traverse(visit, graph) - -def join_blocks(graph): - """Links can be deleted if they are the single exit of a block and - the single entry point of the next block. When this happens, we can - append all the operations of the following block to the preceeding - block (but renaming variables with the appropriate arguments.) - """ - entrymap = mkentrymap(graph) - - def visit(link): - if isinstance(link, Link): - if (len(link.prevblock.exits) == 1 and - len(entrymap[link.target]) == 1 and - link.target.exits): # stop at the returnblock - renaming = {} - for vprev, vtarg in zip(link.args, link.target.inputargs): - renaming[vtarg] = vprev - def rename(v): - return renaming.get(v, v) - for op in link.target.operations: - args = [rename(a) for a in op.args] - op = SpaceOperation(op.opname, args, rename(op.result)) - link.prevblock.operations.append(op) - exits = [] - for exit in link.target.exits: - args = [rename(a) for a in exit.args] - exits.append(Link(args, exit.target, exit.exitcase)) - link.prevblock.exitswitch = rename(link.target.exitswitch) - link.prevblock.recloseblock(*exits) - # simplify recursively the new links - for exit in exits: - visit(exit) - traverse(visit, graph) - -def remove_implicit_exceptions(graph): - """An exception raised implicitely has a particular value of - space.wrap(last_exception) -- see pypy.objspace.flow.objspace.make_op -- - which shows up in the flow graph if the exception is not caught. This - function removes such exceptions entierely. This gets rid for example - of possible IndexErrors by 'getitem', assuming they cannot happen unless - there is an exception handler in the same function.""" - def visit(link): - if isinstance(link, Link) and link in link.prevblock.exits: - if (link.target is graph.exceptblock and - link.prevblock.exitswitch == Constant(last_exception) and - isinstance(link.exitcase, type(Exception)) and - issubclass(link.exitcase, Exception) and - len(link.args) == 2 and - link.args[1] == Constant(last_exc_value) and - link.args[0] in [Constant(last_exception), - Constant(link.exitcase)]): - # remove the link - lst = list(link.prevblock.exits) - lst.remove(link) - link.prevblock.exits = tuple(lst) - if len(lst) <= 1: - link.prevblock.exitswitch = None - traverse(visit, graph) - -def simplify_graph(graph): - """inplace-apply all the existing optimisations to the graph.""" - checkgraph(graph) - eliminate_empty_blocks(graph) - remove_implicit_exceptions(graph) - join_blocks(graph) - checkgraph(graph) - -def remove_direct_loops(graph): - """This is useful for code generators: it ensures that no link has - common input and output variables, which could occur if a block's exit - points back directly to the same block. It allows code generators to be - simpler because they don't have to worry about overwriting input - variables when generating a sequence of assignments.""" - def visit(link): - if isinstance(link, Link) and link.prevblock is link.target: - # insert an empty block with fresh variables. - intermediate = [Variable() for a in link.args] - b = Block(intermediate) - b.closeblock(Link(intermediate, link.target)) - link.target = b - traverse(visit, graph) diff --git a/pypy/translator/test/__init__.py b/pypy/translator/test/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/pypy/translator/test/__init__.py +++ /dev/null diff --git a/pypy/translator/test/autopath.py b/pypy/translator/test/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/translator/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/translator/test/run_snippet.py b/pypy/translator/test/run_snippet.py deleted file mode 100644 index d08bb3fb5f..0000000000 --- a/pypy/translator/test/run_snippet.py +++ /dev/null @@ -1,106 +0,0 @@ -""" - - Use all functions in snippet to test translation to pyrex - -""" -import autopath -import traceback -import sys -from pypy.translator.translator import Translator - -from pypy.translator.test import snippet - -class Result: - def __init__(self, func, argtypes): - self.func = func - self.argtypes = argtypes - self.r_flow = self.r_annotate = self.r_compile = None - for name in 'flow', 'annotate', 'compile': - method = getattr(self, name) - resname = 'r_' + name - try: - method() - except (KeyboardInterrupt, SystemExit): - raise - except: - self.excinfo = sys.exc_info() - setattr(self, resname, False) - else: - setattr(self, resname, True) - - def flow(self): - self.translator = Translator(func) - self.translator.simplify() - - def annotate(self): - self.translator.annotate(self.argtypes) - - def compile(self): - compiled_function = self.translator.compile() - return compiled_function - -def collect_functions(module, specnamelist): - l = [] - for funcname, value in vars(module).items(): - if not hasattr(value, 'func_code'): - continue - for specname in specnamelist: - if funcname.startswith(specname): - l.append(value) - break - if not specnamelist: - l.append(value) - return l - - -def combine(lists): - if not lists: - yield [] - else: - head = lists[0] - if not isinstance(head, tuple): - head = (head,) - tail = lists[1:] - for item in head: - for com in combine(tail): - yield [item] + com - -def get_arg_types(func): - # func_defaults e.g.: ([int, float], [str, int], int) - if func.func_defaults: - for argtypeslist in combine(func.func_defaults): - yield argtypeslist - else: - yield [] - -# format string for result-lines -format_str = "%-30s %10s %10s %10s" - -def repr_result(res): - name = res.func.func_name - argtypes = res.argtypes - funccall = "%s(%s)" % (name, ", ".join([x.__name__ for x in argtypes])) - flow = res.r_flow and 'ok' or 'fail' - ann = res.r_annotate and 'ok' or 'fail' - comp = res.r_compile and 'ok' or 'fail' - return format_str %(funccall, flow, ann, comp) - -if __name__=='__main__': - specnamelist = sys.argv[1:] - funcs = collect_functions(snippet, specnamelist) - results = [] - print format_str %("functioncall", "flowed", "annotated", "compiled") - for func in funcs: - for argtypeslist in get_arg_types(func): - #print "trying %s %s" %(func, argtypeslist) - result = Result(func, argtypeslist) - results.append(result) - print repr_result(result) - if specnamelist and getattr(result, 'excinfo', None): - traceback.print_exception(*result.excinfo) - raise SystemExit, 1 - - #for res in results: - # print repr_result(res) - - diff --git a/pypy/translator/test/snippet.py b/pypy/translator/test/snippet.py deleted file mode 100644 index 8d42adbf0a..0000000000 --- a/pypy/translator/test/snippet.py +++ /dev/null @@ -1,657 +0,0 @@ -"""Snippets for translation - -This module holds various snippets, to be used by translator -unittests. - -TODO, or sanxiyn's plan: - -Each snippet should know about suitable arguments to test it. -(Otherwise, there's a duplcation!) Should the result also be -stored? It can computed by CPython if we don't store it. - -In case of typed translation test, we can give input_arg_types -by actually trying type() on arguments. - -Each unittest should define a list of functions which it is able -to translate correctly, and import the list for tests. When -a translator can handle more, simply adding a function to the -list should suffice. - -But for now, none of the above applies. -""" - -# we define the starting types in the snippet -# function's default arguments. the following -# definitions denote to the "test-generator" -# the possible types that can be passed to -# the specific snippet. -numtype = (int, float, ) -anytype = (int, float, str) -seqtype = (list, tuple) - -def if_then_else(cond=anytype, x=anytype, y=anytype): - if cond: - return x - else: - return y - -def my_gcd(a=numtype, b=numtype): - r = a % b - while r: - a = b - b = r - r = a % b - return b - -def is_perfect_number(n=int): - div = 1 - sum = 0 - while div < n: - if n % div == 0: - sum += div - div += 1 - return n == sum - -def my_bool(x=int): - return not not x - -def two_plus_two(): - """Array test""" - array = [0] * 3 - array[0] = 2 - array[1] = 2 - array[2] = array[0] + array[1] - return array[2] - -def get_set_del_slice(l=list): - del l[:1] - del l[-1:] - del l[2:4] - l[:1] = [3] - l[-1:] = [9] - l[2:4] = [8,11] - return l[:2], l[5:], l[3:5] - -def sieve_of_eratosthenes(): - """Sieve of Eratosthenes - - This one is from an infamous benchmark, "The Great Computer - Language Shootout". - - URL is: http://www.bagley.org/~doug/shootout/ - """ - flags = [True] * (8192+1) - count = 0 - i = 2 - while i <= 8192: - if flags[i]: - k = i + i - while k <= 8192: - flags[k] = False - k = k + i - count = count + 1 - i = i + 1 - return count - -def simple_func(i=numtype): - return i + 1 - -def while_func(i=numtype): - total = 0 - while i > 0: - total = total + i - i = i - 1 - return total - -def nested_whiles(i=int, j=int): - s = '' - z = 5 - while z > 0: - z = z - 1 - u = i - while u < j: - u = u + 1 - s = s + '.' - s = s + '!' - return s - -def poor_man_range(i=int): - lst = [] - while i > 0: - i = i - 1 - lst.append(i) - lst.reverse() - return lst - -def poor_man_rev_range(i=int): - lst = [] - while i > 0: - i = i - 1 - lst += [i] - return lst - -def simple_id(x=anytype): - return x - -def branch_id(cond=anytype, a=anytype, b=anytype): - while 1: - if cond: - return a - else: - return b - -def builtinusage(): - return pow(2, 2) - -def yast(lst=seqtype): - total = 0 - for z in lst: - total = total + z - return total - -def time_waster(n=int): - """Arbitrary test function""" - i = 0 - x = 1 - while i < n: - j = 0 - while j <= i: - j = j + 1 - x = x + (i & j) - i = i + 1 - return x - -def half_of_n(n=int): - """Slice test""" - i = 0 - lst = range(n) - while lst: - lst = lst[1:-1] - i = i + 1 - return i - -def int_id(x=int): - i = 0 - while i < x: - i = i + 1 - return i - -def greet(target=str): - """String test""" - hello = "hello" - return hello + target - -def choose_last(): - """For loop test""" - set = ["foo", "bar", "spam", "egg", "python"] - for choice in set: - pass - return choice - -def poly_branch(x=int): - if x: - y = [1,2,3] - else: - y = ['a','b','c'] - - z = y - return z*2 - -def s_and(x=anytype, y=anytype): - if x and y: - return 'yes' - else: - return 'no' - -def break_continue(x=numtype): - result = [] - i = 0 - while 1: - i = i + 1 - try: - if i&1: - continue - if i >= x: - break - finally: - result.append(i) - i = i + 1 - return result - -def reverse_3(lst=seqtype): - try: - a, b, c = lst - except: - return 0, 0, 0 - return c, b, a - -def finallys(lst=seqtype): - x = 1 - try: - x = 2 - try: - x = 3 - a, = lst - x = 4 - except KeyError: - return 5 - except ValueError: - return 6 - b, = lst - x = 7 - finally: - x = 8 - return x - -def finally2(o, k): - try: - o[k] += 1 - finally: - o[-1] = 'done' - -def bare_raise(o, ignore): - try: - return o[5] - except: - if not ignore: - raise - -def factorial(n=int): - if n <= 1: - return 1 - else: - return n * factorial(n-1) - -def factorial2(n=int): # analysed in a different order - if n > 1: - return n * factorial2(n-1) - else: - return 1 - -def _append_five(lst): - lst += [5] - -def call_five(): - a = [] - _append_five(a) - return a - -def _append_six(lst): - lst += [6] - -def call_five_six(): - a = [] - _append_five(a) - _append_six(a) - return a - -def call_unpack_56(): - a = call_five_six() - return len(a), a[0], a[1] - -def forty_two(): - return 42 - -def never_called(): - return "booo" - -def constant_result(): - if forty_two(): - return "yadda" - else: - return never_called() - -class CallablePrebuiltConstant(object): - def __call__(self): - return 42 - -callable_prebuilt_constant = CallablePrebuiltConstant() - -def call_cpbc(): - return callable_prebuilt_constant() - - -# INHERITANCE / CLASS TESTS -class C(object): pass - -def build_instance(): - c = C() - return c - -def set_attr(): - c = C() - c.a = 1 - c.a = 2 - return c.a - -def merge_setattr(x): - if x: - c = C() - c.a = 1 - else: - c = C() - return c.a - -class D(C): pass -class E(C): pass - -def inheritance1(): - d = D() - d.stuff = () - e = E() - e.stuff = -12 - e.stuff = 3 - lst = [d, e] - return d.stuff, e.stuff - - -def inheritance2(): - d = D() - d.stuff = (-12, -12) - e = E() - e.stuff = (3, "world") - return _getstuff(d), _getstuff(e) - -class F: - pass -class G(F): - def m(self, x): - return self.m2(x) - def m2(self, x): - return D(), x -class H(F): - def m(self, y): - self.attr = 1 - return E(), y - -def knownkeysdict(b=anytype): - if b: - d = {'a': 0} - d['b'] = b - d['c'] = 'world' - else: - d = {'b': -123} - return d['b'] - -def generaldict(key=str, value=int, key2=str, value2=int): - d = {key: value} - d[key2] = value2 - return d[key or key2] - -def prime(n=int): - return len([i for i in range(1,n+1) if n%i==0]) == 2 - -class A0: - pass -class A1(A0): - clsattr = 123 -class A2(A1): - clsattr = 456 -class A3(A2): - clsattr = 789 -class A4(A3): - pass -class A5(A0): - clsattr = 101112 - -def classattribute(flag=int): - if flag == 1: - x = A1() - elif flag == 2: - x = A2() - elif flag == 3: - x = A3() - elif flag == 4: - x = A4() - else: - x = A5() - return x.clsattr - - -class Z: - def my_method(self): - return self.my_attribute - -class WithInit: - def __init__(self, n): - self.a = n - -class WithMoreInit(WithInit): - def __init__(self, n, m): - WithInit.__init__(self, n) - self.b = m - -def simple_method(v=anytype): - z = Z() - z.my_attribute = v - return z.my_method() - -def with_init(v=int): - z = WithInit(v) - return z.a - -def with_more_init(v=int, w=bool): - z = WithMoreInit(v, w) - if z.b: - return z.a - else: - return -z.a - -global_z = Z() -global_z.my_attribute = 42 - -def global_instance(): - return global_z.my_method() - -def call_Z_my_method(z): - return z.my_method - -def somepbc_simplify(): - z = Z() - call_Z_my_method(global_z) - call_Z_my_method(z) - - -global_c = C() -global_c.a = 1 - -def global_newstyle_instance(): - return global_c - -global_rl = [] -global_rl.append(global_rl) - -def global_recursive_list(): - return global_rl - -class BadInit(object): - def update(self, k): - self.k = 1 - def __init__(self, v): - self.update(**{'k':v}) - def read(self): - return self.k - -global_bi = BadInit(1) - -class MI_A(object): - a = 1 -class MI_B(MI_A): - b = 2 -class MI_C(MI_A): - c = 3 -class MI_D(MI_B, MI_C): - d = 4 - -def multiple_inheritance(): - i = MI_D() - return i.a + i.b + i.c + i.d - -def global_badinit(): - return global_bi.read() - -def flow_type_info(i): - if isinstance(i, int): - a = i + 1 - else: - a = len(str(i)) - return a - -def flow_usertype_info(ob): - if isinstance(ob, WithInit): - return ob - else: - return WithMoreInit(1, 2) - -def flow_identity_info(x=object, y=object): - if x is None: - if y is None: - return (x, y) - else: - return (x, None) - else: - if y is None: - return (None, y) - else: - return (None, None) - -def star_args(x, y, *args): - return x + args[0] - -def call_star_args(z): - return star_args(z, 5, 10, 15, 20) - -def default_args(x, y=2, z=3L): - return x+y+z - -def call_default_args(u): - return default_args(111, u) - -def default_and_star_args(x, y=2, z=3, *more): - return x+y+z+len(more) - -def call_default_and_star_args(u): - return (default_and_star_args(111, u), - default_and_star_args(-1000, -2000, -3000, -4000, -5000)) - -def call_with_star(z): - return default_args(-20, *z) - -def call_with_keyword(z): - return default_args(-20, z=z) - -def powerset(setsize=int): - """Powerset - - This one is from a Philippine Pythonista Hangout, an modified - version of Andy Sy's code. - - list.append is modified to list concatenation, and powerset - is pre-allocated and stored, instead of printed. - - URL is: http://lists.free.net.ph/pipermail/python/2002-November/ - """ - set = range(setsize) - maxcardinality = pow(2, setsize) - bitmask = 0L - powerset = [None] * maxcardinality - ptr = 0 - while bitmask < maxcardinality: - bitpos = 1L - index = 0 - subset = [] - while bitpos < maxcardinality: - if bitpos & bitmask: - subset = subset + [set[index]] - index += 1 - bitpos <<= 1 - powerset[ptr] = subset - ptr += 1 - bitmask += 1 - return powerset - -# --------------------(Currently) Non runnable Functions --------------------- - -def _somebug1(n=int): - l = [] - v = l.append - while n: - l[7] = 5 # raises an exception - break - return v - -def _inheritance_nonrunnable(): - d = D() - d.stuff = (-12, -12) - e = E() - e.stuff = (3, "world") - return C().stuff - -# --------------------(Currently) Non compillable Functions --------------------- - -def _attrs(): - def b(): pass - b.f = 4 - b.g = 5 - return b.f + b.g - -def _getstuff(x): - return x.stuff - -def _methodcall1(cond): - if cond: - x = G() - else: - x = H() - return x.m(42) - -def func1(): - pass - -def func2(): - pass - -def mergefunctions(cond): - if cond: - x = func1 - else: - x = func2 - return x - -def func_producing_exception(): - raise ValueError, "this might e.g. block the caller" - -def funccallsex(): - return func_producing_exception() - - -def func_arg_unpack(): - a,b = 3, "hello" - return a - -class APBC: - def __init__(self): - self.answer = 42 - -apbc = APBC() -apbc.answer = 7 - -def preserve_pbc_attr_on_instance(cond): - if cond: - x = APBC() - else: - x = apbc - return x.answer - - -def is_and_knowntype(x): - if x is None: - return x - else: - return None - -def isinstance_and_knowntype(x): - if isinstance(x, APBC): - return x - else: - return apbc diff --git a/pypy/translator/test/test_annrpython.py b/pypy/translator/test/test_annrpython.py deleted file mode 100644 index aea12652d7..0000000000 --- a/pypy/translator/test/test_annrpython.py +++ /dev/null @@ -1,395 +0,0 @@ - -import autopath -from pypy.tool.udir import udir - -from pypy.translator.annrpython import RPythonAnnotator, annmodel -from pypy.translator.translator import Translator -from pypy.objspace.flow.model import * - -from pypy.translator.test import snippet - -class TestAnnonateTestCase: - objspacename = 'flow' - - def make_fun(self, func): - import inspect - try: - func = func.im_func - except AttributeError: - pass - name = func.func_name - funcgraph = self.space.build_flow(func) - funcgraph.source = inspect.getsource(func) - return funcgraph - - def reallyshow(self, graph): - import os - from pypy.translator.tool.make_dot import make_dot - dest = make_dot('b', graph) - os.system('gv %s' % str(dest)) - - def test_simple_func(self): - """ - one test source: - def f(x): - return x+1 - """ - x = Variable("x") - result = Variable("result") - op = SpaceOperation("add", [x, Constant(1)], result) - block = Block([x]) - fun = FunctionGraph("f", block) - block.operations.append(op) - block.closeblock(Link([result], fun.returnblock)) - a = RPythonAnnotator() - a.build_types(fun, [int]) - assert a.gettype(fun.getreturnvar()) == int - - def test_while(self): - """ - one test source: - def f(i): - while i > 0: - i = i - 1 - return i - """ - i = Variable("i") - conditionres = Variable("conditionres") - conditionop = SpaceOperation("gt", [i, Constant(0)], conditionres) - decop = SpaceOperation("add", [i, Constant(-1)], i) - headerblock = Block([i]) - whileblock = Block([i]) - - fun = FunctionGraph("f", headerblock) - headerblock.operations.append(conditionop) - headerblock.exitswitch = conditionres - headerblock.closeblock(Link([i], fun.returnblock, False), - Link([i], whileblock, True)) - whileblock.operations.append(decop) - whileblock.closeblock(Link([i], headerblock)) - - a = RPythonAnnotator() - a.build_types(fun, [int]) - assert a.gettype(fun.getreturnvar()) == int - - def test_while_sum(self): - """ - one test source: - def f(i): - sum = 0 - while i > 0: - sum = sum + i - i = i - 1 - return sum - """ - i = Variable("i") - sum = Variable("sum") - - conditionres = Variable("conditionres") - conditionop = SpaceOperation("gt", [i, Constant(0)], conditionres) - decop = SpaceOperation("add", [i, Constant(-1)], i) - addop = SpaceOperation("add", [i, sum], sum) - startblock = Block([i]) - headerblock = Block([i, sum]) - whileblock = Block([i, sum]) - - fun = FunctionGraph("f", startblock) - startblock.closeblock(Link([i, Constant(0)], headerblock)) - headerblock.operations.append(conditionop) - headerblock.exitswitch = conditionres - headerblock.closeblock(Link([sum], fun.returnblock, False), - Link([i, sum], whileblock, True)) - whileblock.operations.append(addop) - whileblock.operations.append(decop) - whileblock.closeblock(Link([i, sum], headerblock)) - - a = RPythonAnnotator() - a.build_types(fun, [int]) - assert a.gettype(fun.getreturnvar()) == int - - def test_f_calls_g(self): - a = RPythonAnnotator() - s = a.build_types(f_calls_g, [int]) - # result should be an integer - assert s.knowntype == int - - def test_lists(self): - fun = self.make_fun(snippet.poor_man_rev_range) - a = RPythonAnnotator() - a.build_types(fun, [int]) - # result should be a list of integers - assert a.gettype(fun.getreturnvar()) == list - end_cell = a.binding(fun.getreturnvar()) - assert end_cell.s_item.knowntype == int - - def test_factorial(self): - a = RPythonAnnotator() - s = a.build_types(snippet.factorial, [int]) - # result should be an integer - assert s.knowntype == int - - def test_factorial2(self): - a = RPythonAnnotator() - s = a.build_types(snippet.factorial2, [int]) - # result should be an integer - assert s.knowntype == int - - def test_build_instance(self): - a = RPythonAnnotator() - s = a.build_types(snippet.build_instance, []) - # result should be a snippet.C instance - assert s.knowntype == snippet.C - - def test_set_attr(self): - a = RPythonAnnotator() - s = a.build_types(snippet.set_attr, []) - # result should be an integer - assert s.knowntype == int - - def test_merge_setattr(self): - a = RPythonAnnotator() - s = a.build_types(snippet.merge_setattr, [int]) - # result should be an integer - assert s.knowntype == int - - def test_inheritance1(self): - a = RPythonAnnotator() - s = a.build_types(snippet.inheritance1, []) - # result should be exactly: - assert s == annmodel.SomeTuple([ - a.bookkeeper.immutablevalue(()), - annmodel.SomeInteger() - ]) - - def test_inheritance2(self): - a = RPythonAnnotator() - s = a.build_types(snippet._inheritance_nonrunnable, []) - # result should be exactly: - assert s == annmodel.SomeTuple([ - annmodel.SomeInteger(), - annmodel.SomeObject() - ]) - - def test_poor_man_range(self): - a = RPythonAnnotator() - s = a.build_types(snippet.poor_man_range, [int]) - # result should be a list of integers - assert s.knowntype == list - assert s.s_item.knowntype == int - - def test_methodcall1(self): - a = RPythonAnnotator() - s = a.build_types(snippet._methodcall1, [int]) - # result should be a tuple of (C, positive_int) - assert s.knowntype == tuple - assert len(s.items) == 2 - assert s.items[0].knowntype == snippet.C - assert s.items[1].knowntype == int - assert s.items[1].nonneg == True - - def test_classes_methodcall1(self): - a = RPythonAnnotator() - a.build_types(snippet._methodcall1, [int]) - # the user classes should have the following attributes: - classes = a.bookkeeper.userclasses - assert classes[snippet.F].attrs.keys() == ['m'] - assert classes[snippet.G].attrs.keys() == ['m2'] - assert classes[snippet.H].attrs.keys() == ['attr'] - assert classes[snippet.H].about_attribute('attr') == ( - a.bookkeeper.immutablevalue(1)) - - def DISABLED_test_knownkeysdict(self): - # disabled, SomeDict() is now a general {s_key: s_value} dict - a = RPythonAnnotator() - s = a.build_types(snippet.knownkeysdict, [int]) - # result should be an integer - assert s.knowntype == int - - def test_generaldict(self): - a = RPythonAnnotator() - s = a.build_types(snippet.generaldict, [str, int, str, int]) - # result should be an integer - assert s.knowntype == int - - def test_somebug1(self): - a = RPythonAnnotator() - s = a.build_types(snippet._somebug1, [int]) - # result should be a built-in method - assert isinstance(s, annmodel.SomeBuiltin) - - def test_with_init(self): - a = RPythonAnnotator() - s = a.build_types(snippet.with_init, [int]) - # result should be an integer - assert s.knowntype == int - - def test_with_more_init(self): - a = RPythonAnnotator() - s = a.build_types(snippet.with_more_init, [int, bool]) - # the user classes should have the following attributes: - classes = a.bookkeeper.userclasses - # XXX on which class should the attribute 'a' appear? We only - # ever flow WithInit.__init__ with a self which is an instance - # of WithMoreInit, so currently it appears on WithMoreInit. - assert classes[snippet.WithMoreInit].about_attribute('a') == ( - annmodel.SomeInteger()) - assert classes[snippet.WithMoreInit].about_attribute('b') == ( - annmodel.SomeBool()) - - def test_global_instance(self): - a = RPythonAnnotator() - s = a.build_types(snippet.global_instance, []) - # currently this returns the constant 42. - # XXX not sure this is the best behavior... - assert s == a.bookkeeper.immutablevalue(42) - - def test_call_five(self): - a = RPythonAnnotator() - s = a.build_types(snippet.call_five, []) - # returns should be a list of constants (= 5) - assert isinstance(s, annmodel.SomeList) - assert s.s_item == a.bookkeeper.immutablevalue(5) - - def test_call_five_six(self): - a = RPythonAnnotator() - s = a.build_types(snippet.call_five_six, []) - # returns should be a list of positive integers - assert isinstance(s, annmodel.SomeList) - assert s.s_item == annmodel.SomeInteger(nonneg=True) - - def test_constant_result(self): - a = RPythonAnnotator() - s = a.build_types(snippet.constant_result, []) - #a.translator.simplify() - # must return "yadda" - assert s == a.bookkeeper.immutablevalue("yadda") - keys = a.translator.flowgraphs.keys() - keys.sort() - expected = [snippet.constant_result, - snippet.forty_two, - # and not snippet.never_called - ] - expected.sort() - assert keys == expected - a.simplify() - #a.translator.view() - - def test_call_pbc(self): - a = RPythonAnnotator() - s = a.build_types(snippet.call_cpbc, []) - assert s == a.bookkeeper.immutablevalue(42) - - def test_flow_type_info(self): - a = RPythonAnnotator() - s = a.build_types(snippet.flow_type_info, [object]) - a.translator.simplify() - a.simplify() - #a.translator.view() - assert s.knowntype == int - - def test_flow_type_info_2(self): - a = RPythonAnnotator() - s = a.build_types(snippet.flow_type_info, - [annmodel.SomeInteger(nonneg=True)]) - # this checks that isinstance(i, int) didn't loose the - # actually more precise information that i is non-negative - assert s == annmodel.SomeInteger(nonneg=True) - - def test_flow_usertype_info(self): - a = RPythonAnnotator() - s = a.build_types(snippet.flow_usertype_info, [object]) - #a.translator.view() - assert s.knowntype == snippet.WithInit - - def test_flow_usertype_info2(self): - a = RPythonAnnotator() - s = a.build_types(snippet.flow_usertype_info, [snippet.WithMoreInit]) - #a.translator.view() - assert s.knowntype == snippet.WithMoreInit - - def test_flow_identity_info(self): - a = RPythonAnnotator() - s = a.build_types(snippet.flow_identity_info, [object, object]) - a.translator.simplify() - a.simplify() - #a.translator.view() - assert s == a.bookkeeper.immutablevalue((None, None)) - - def test_mergefunctions(self): - a = RPythonAnnotator() - s = a.build_types(snippet.mergefunctions, [int]) - # the test is mostly that the above line hasn't blown up - # but let's at least check *something* - assert isinstance(s, annmodel.SomePBC) - - def test_func_calls_func_which_just_raises(self): - a = RPythonAnnotator() - s = a.build_types(snippet.funccallsex, []) - # the test is mostly that the above line hasn't blown up - # but let's at least check *something* - #self.assert_(isinstance(s, SomeCallable)) - - def test_tuple_unpack_from_const_tuple_with_different_types(self): - a = RPythonAnnotator() - s = a.build_types(snippet.func_arg_unpack, []) - assert isinstance(s, annmodel.SomeInteger) - assert s.const == 3 - - def test_pbc_attr_preserved_on_instance(self): - a = RPythonAnnotator() - s = a.build_types(snippet.preserve_pbc_attr_on_instance, [bool]) - #a.simplify() - #a.translator.view() - assert s == annmodel.SomeInteger(nonneg=True) - #self.assertEquals(s.__class__, annmodel.SomeInteger) - - def test_is_and_knowntype_data(self): - a = RPythonAnnotator() - s = a.build_types(snippet.is_and_knowntype, [bool]) - #a.simplify() - #a.translator.view() - assert s == a.bookkeeper.immutablevalue(None) - - def test_isinstance_and_knowntype_data(self): - a = RPythonAnnotator() - x = annmodel.SomePBC({snippet.apbc: True}) - s = a.build_types(snippet.isinstance_and_knowntype, [x]) - #a.simplify() - #a.translator.view() - assert s == x - - def test_somepbc_simplify(self): - a = RPythonAnnotator() - # this example used to trigger an AssertionError - a.build_types(snippet.somepbc_simplify, []) - - def test_builtin_methods(self): - a = RPythonAnnotator() - iv = a.bookkeeper.immutablevalue - # this checks that some built-in methods are really supported by - # the annotator (it doesn't check that they operate property, though) - for example, methname, s_example in [ - ('', 'join', annmodel.SomeString()), - ([], 'append', annmodel.SomeList({})), - ([], 'reverse', annmodel.SomeList({})), - ([], 'insert', annmodel.SomeList({})), - ([], 'pop', annmodel.SomeList({})), - ]: - constmeth = getattr(example, methname) - s_constmeth = iv(constmeth) - assert isinstance(s_constmeth, annmodel.SomeBuiltin) - s_meth = s_example.getattr(iv(methname)) - assert isinstance(s_constmeth, annmodel.SomeBuiltin) - - -def g(n): - return [0,1,2,n] - -def f_calls_g(n): - total = 0 - lst = g(n) - i = 0 - while i < len(lst): - total += i - i += 1 - return total diff --git a/pypy/translator/test/test_cltrans.py b/pypy/translator/test/test_cltrans.py deleted file mode 100644 index 7a6a2adf9a..0000000000 --- a/pypy/translator/test/test_cltrans.py +++ /dev/null @@ -1,129 +0,0 @@ -import autopath -from pypy.tool.udir import udir -import py -import os - -def setup_module(mod): - mod.global_cl = os.getenv("PYPY_CL") - if not mod.global_cl: - mod.global_cl = cl_detect() - -def cl_detect(): - if is_on_path("clisp"): - return "clisp" - if is_on_path("lisp"): - if is_on_path("cmuclinvoke.sh"): - return "cmuclinvoke.sh" - if is_on_path("sbcl"): - if is_on_path("sbclinvoke.sh"): - return "sbclinvoke.sh" - return None - -def is_on_path(name): - try: - py.path.local.sysfind(name) - except py.error.ENOENT: - return False - else: - return True - -def make_cl_func(func, argtypes=[]): - from pypy.translator.tool.buildcl import _make_cl_func - return _make_cl_func(func, global_cl, udir, argtypes) - - -from pypy.translator.test import snippet as t -from pypy.translator.tool.buildcl import Literal - -class TestGenCLTestCase: - objspacename = 'flow' - - def setup_method(self,method): - if not global_cl: - py.test.skip("Common Lisp neither configured nor detected.") - - def test_if(self): - cl_if = make_cl_func(t.if_then_else) - assert cl_if(True, 50, 100) == 50 - assert cl_if(False, 50, 100) == 100 - assert cl_if(0, 50, 100) == 100 - assert cl_if(1, 50, 100) == 50 - assert cl_if([], 50, 100) == 100 - assert cl_if([[]], 50, 100) == 50 - - def test_gcd(self): - cl_gcd = make_cl_func(t.my_gcd, [int, int]) - assert cl_gcd(96, 64) == 32 - - def test_is_perfect(self): # pun intended - cl_perfect = make_cl_func(t.is_perfect_number, [int]) - assert cl_perfect(24) == False - assert cl_perfect(28) == True - - def test_bool(self): - cl_bool = make_cl_func(t.my_bool) - assert cl_bool(0) == False - assert cl_bool(42) == True - assert cl_bool(True) == True - - def test_array(self): - cl_four = make_cl_func(t.two_plus_two) - assert cl_four() == 4 - - def test_sieve(self): - cl_sieve = make_cl_func(t.sieve_of_eratosthenes) - assert cl_sieve() == 1028 - - def test_easy(self): - # These are the Pyrex tests which were easy to adopt. - f1 = make_cl_func(t.simple_func, [int]) - assert f1(1) == 2 - f2 = make_cl_func(t.while_func, [int]) - assert f2(10) == 55 - f3 = make_cl_func(t.simple_id) - assert f3(9) == 9 - f4 = make_cl_func(t.branch_id) - assert f4(1, 2, 3) == 2 - assert f4(0, 2, 3) == 3 - f5 = make_cl_func(t.int_id, [int]) - assert f5(3) == 3 - f6 = make_cl_func(t.time_waster, [int]) - assert f6(30) == 3657 - - def test_string(self): - cl_greet = make_cl_func(t.greet, [str]) - assert cl_greet("world") == "helloworld" - cl_stringmaker = make_cl_func(t.nested_whiles, [int, int]) - assert cl_stringmaker(111, 114) == ( - "...!...!...!...!...!") - - def test_for(self): - cl_python = make_cl_func(t.choose_last) - assert cl_python() == "python" - - def test_builtin(self): - cl_builtinusage = make_cl_func(t.builtinusage) - assert cl_builtinusage() == 4 - - def test_slice(self): - cl_half = make_cl_func(t.half_of_n, [int]) - assert cl_half(10) == 5 - - def test_powerset(self): - cl_powerset = make_cl_func(t.powerset, [int]) - result = cl_powerset(3) - assert result.__class__ == Literal - assert result.val == ( - '#(#() #(0) #(1) #(0 1) #(2) #(0 2) #(1 2) #(0 1 2))') - def test_yast(self): - cl_sum = make_cl_func(t.yast) # yet another sum test - assert cl_sum(range(12)) == 66 - - -# TODO -# poor_man_range -# - append/reverse. not RPython. delegate? -# attrs -# - attribute. need object. symbol-plist? -# yast -# - need way to specify that argument is list of int. diff --git a/pypy/translator/test/test_ctrans.py b/pypy/translator/test/test_ctrans.py deleted file mode 100644 index 9916aa659e..0000000000 --- a/pypy/translator/test/test_ctrans.py +++ /dev/null @@ -1,284 +0,0 @@ -import autopath -import py -from pypy.tool.udir import udir -from pypy.translator.genc import GenC -from pypy.objspace.flow.model import * -from pypy.translator.tool.buildpyxmodule import make_module_from_c -from pypy.translator.translator import Translator - -from pypy.translator.test import snippet - -# XXX this tries to make compiling faster for full-scale testing -from pypy.translator.tool import buildpyxmodule -buildpyxmodule.enable_fast_compilation() - -class TestNoTypeCGenTestCase: - objspacename = 'flow' - - def build_cfunc(self, func): - try: func = func.im_func - except AttributeError: pass - t = Translator(func) - t.simplify() - return py.test.skip_on_error(t.ccompile) - - def test_simple_func(self): - cfunc = self.build_cfunc(snippet.simple_func) - assert cfunc(1) == 2 - - def test_while_func(self): - while_func = self.build_cfunc(snippet.while_func) - assert while_func(10) == 55 - - def test_nested_whiles(self): - nested_whiles = self.build_cfunc(snippet.nested_whiles) - assert nested_whiles(111, 114) == ( - '...!...!...!...!...!') - - def test_poor_man_range(self): - poor_man_range = self.build_cfunc(snippet.poor_man_range) - assert poor_man_range(10) == range(10) - - def poor_man_rev_range(self): - poor_man_rev_range = self.build_cfunc(snippet.poor_man_rev_range) - assert poor_man_rev_range(10) == range(9,-1,-1) - - def test_simple_id(self): - #we just want to see, if renaming of parameter works correctly - #if the first branch is the end branch - simple_id = self.build_cfunc(snippet.simple_id) - assert simple_id(9) == 9 - - def test_branch_id(self): - branch_id = self.build_cfunc(snippet.branch_id) - assert branch_id(1, 2, 3) == 2 - assert branch_id(0, 2, 3) == 3 - - def test_int_id(self): - int_id = self.build_cfunc(snippet.int_id) - assert int_id(3) == 3 - - def dont_test_attrs(self): - attrs = self.build_cfunc(snippet.attrs) - assert attrs() == 9 - - def test_builtinusage(self): - fun = self.build_cfunc(snippet.builtinusage) - assert fun() == 4 - - def test_sieve(self): - sieve = self.build_cfunc(snippet.sieve_of_eratosthenes) - assert sieve() == 1028 - - def test_slice(self): - half = self.build_cfunc(snippet.half_of_n) - assert half(10) == 5 - - def test_poly_branch(self): - poly_branch = self.build_cfunc(snippet.poly_branch) - assert poly_branch(10) == [1,2,3]*2 - assert poly_branch(0) == ['a','b','c']*2 - - def test_and(self): - sand = self.build_cfunc(snippet.s_and) - assert sand(5, 6) == "yes" - assert sand(5, 0) == "no" - assert sand(0, 6) == "no" - assert sand(0, 0) == "no" - - def test_yast(self): - yast = self.build_cfunc(snippet.yast) - assert yast([1000,100,10,1]) == 1111 - assert yast(range(100)) == (99*100)/2 - - def test_with_init(self): - with_init = self.build_cfunc(snippet.with_init) - assert with_init(0) == 0 - assert with_init(-100) == -100 - - def test_with_more_init(self): - with_more_init = self.build_cfunc(snippet.with_more_init) - assert with_more_init(10, False) == -10 - assert with_more_init(20, True) == 20 - - def test_global_instance(self): - global_instance = self.build_cfunc(snippet.global_instance) - assert global_instance() == 42 - - def test_global_newstyle_instance(self): - global_newstyle_instance = self.build_cfunc(snippet.global_newstyle_instance) - assert global_newstyle_instance().a == 1 - - def test_global_recursive_list(self): - global_recursive_list = self.build_cfunc(snippet.global_recursive_list) - lst = global_recursive_list() - assert len(lst) == 1 - assert lst[0] is lst - -## def test_global_badinit(self): -## global_badinit = self.build_cfunc(snippet.global_badinit) -## self.assertEquals(global_badinit(), 1) - - def test_multiple_inheritance(self): - multiple_inheritance = self.build_cfunc(snippet.multiple_inheritance) - assert multiple_inheritance() == 1+2+3+4 - - def test_call_star_args(self): - call_star_args = self.build_cfunc(snippet.call_star_args) - assert call_star_args(42) == 52 - - def test_call_default_args(self): - call_default_args = self.build_cfunc(snippet.call_default_args) - assert call_default_args(42) == 111+42+3 - - def test_call_default_and_star_args(self): - call_default_and_star_args = self.build_cfunc( - snippet.call_default_and_star_args) - assert call_default_and_star_args(42) == ( - (111+42+3+0, -1000-2000-3000+2)) - - def test_call_with_star(self): - call_with_star = self.build_cfunc(snippet.call_with_star) - assert call_with_star(()) == -15L - assert call_with_star((4,)) == -13L - assert call_with_star((4,7)) == -9L - assert call_with_star([]) == -15L - assert call_with_star([4]) == -13L - assert call_with_star([4,7]) == -9L - raises(TypeError, call_with_star, (4,7,12)) - raises(TypeError, call_with_star, [4,7,12,63]) - raises(TypeError, call_with_star, 521) - - def test_call_with_keyword(self): - call_with_keyword = self.build_cfunc(snippet.call_with_keyword) - assert call_with_keyword(100) == 82 - - def test_finallys(self): - finallys = self.build_cfunc(snippet.finallys) - assert finallys(['hello']) == 8 - assert finallys('X') == 8 - assert finallys([]) == 6 - assert finallys('XY') == 6 - - def test_finally2(self): - finally2 = self.build_cfunc(snippet.finally2) - lst = range(10) - finally2(lst, 5) - assert lst == [0,1,2,3,4, 6, 6,7,8, 'done'] - dic = {} - raises(KeyError, finally2, dic, "won't find this key") - assert dic == {-1: 'done'} - - def test_bare_raise(self): - bare_raise = self.build_cfunc(snippet.bare_raise) - assert bare_raise(range(0, 100, 10), False) == 50 - assert bare_raise(range(0, 100, 10), True) == 50 - raises(IndexError, bare_raise, range(0, 30, 10), False) - assert bare_raise(range(0, 30, 10), True) == None - - def test_get_set_del_slice(self): - fn = self.build_cfunc(snippet.get_set_del_slice) - l = list('abcdefghij') - result = fn(l) - assert l == [3, 'c', 8, 11, 'h', 9] - assert result == ([3, 'c'], [9], [11, 'h']) - -class TestTypedTestCase: - - def getcompiled(self, func): - t = Translator(func, simplifying=True) - # builds starting-types from func_defs - argstypelist = [] - if func.func_defaults: - for spec in func.func_defaults: - if isinstance(spec, tuple): - spec = spec[0] # use the first type only for the tests - argstypelist.append(spec) - a = t.annotate(argstypelist) - a.simplify() - return py.test.skip_on_error(t.ccompile) - - def test_set_attr(self): - set_attr = self.getcompiled(snippet.set_attr) - assert set_attr() == 2 - - def test_inheritance2(self): - inheritance2 = self.getcompiled(snippet.inheritance2) - assert inheritance2() == ((-12, -12), (3, "world")) - - def test_factorial2(self): - factorial2 = self.getcompiled(snippet.factorial2) - assert factorial2(5) == 120 - - def test_factorial(self): - factorial = self.getcompiled(snippet.factorial) - assert factorial(5) == 120 - - def test_simple_method(self): - simple_method = self.getcompiled(snippet.simple_method) - assert simple_method(55) == 55 - - def test_sieve_of_eratosthenes(self): - sieve_of_eratosthenes = self.getcompiled(snippet.sieve_of_eratosthenes) - assert sieve_of_eratosthenes() == 1028 - - def test_nested_whiles(self): - nested_whiles = self.getcompiled(snippet.nested_whiles) - assert nested_whiles(5,3) == '!!!!!' - - def test_call_five(self): - call_five = self.getcompiled(snippet.call_five) - result = call_five() - assert result == [5] - # -- currently result isn't a real list, but a pseudo-array - # that can't be inspected from Python. - #self.assertEquals(result.__class__.__name__[:8], "list of ") - - def test_call_unpack_56(self): - call_unpack_56 = self.getcompiled(snippet.call_unpack_56) - result = call_unpack_56() - assert result == (2, 5, 6) - - def test_class_defaultattr(self): - class K: - n = "hello" - def class_defaultattr(): - k = K() - k.n += " world" - return k.n - fn = self.getcompiled(class_defaultattr) - assert fn() == "hello world" - - def test_tuple_repr(self): - def tuple_repr(x=int, y=object): - z = x, y - while x: - x = x-1 - return z - fn = self.getcompiled(tuple_repr) - assert fn(6,'a') == (6,'a') - - def test_classattribute(self): - fn = self.getcompiled(snippet.classattribute) - assert fn(1) == 123 - assert fn(2) == 456 - assert fn(3) == 789 - assert fn(4) == 789 - assert fn(5) == 101112 - - def test_get_set_del_slice(self): - fn = self.getcompiled(snippet.get_set_del_slice) - l = list('abcdefghij') - result = fn(l) - assert l == [3, 'c', 8, 11, 'h', 9] - assert result == ([3, 'c'], [9], [11, 'h']) - - def test_slice_long(self): - def slice_long(l=list, n=int): - return l[:n] - fn = self.getcompiled(slice_long) - l = list('abc') - result = fn(l, 2**32) - assert result == list('abc') - result = fn(l, 2**64) - assert result == list('abc') diff --git a/pypy/translator/test/test_pyrextrans.py b/pypy/translator/test/test_pyrextrans.py deleted file mode 100644 index 3cdb59be6b..0000000000 --- a/pypy/translator/test/test_pyrextrans.py +++ /dev/null @@ -1,139 +0,0 @@ -import autopath -import py -from pypy.tool.udir import udir -from pypy.translator.genpyrex import GenPyrex -from pypy.objspace.flow.model import * -from pypy.translator.tool.buildpyxmodule import build_cfunc -from pypy.translator.translator import Translator - -from pypy.translator.test import snippet - -# XXX this tries to make compiling faster for full-scale testing -from pypy.translator.tool import buildpyxmodule -buildpyxmodule.enable_fast_compilation() - - -class TestNoTypePyrexGenTestCase: - objspacename = 'flow' - - def build_cfunc(self, func): - try: func = func.im_func - except AttributeError: pass - - dot = py.test.config.option.verbose >0 and 1 or 0 - options = { - 'simplify' : 1, - 'dot' : dot, - } - return py.test.skip_on_error(build_cfunc, func, **options) - - def test_simple_func(self): - cfunc = self.build_cfunc(snippet.simple_func) - assert cfunc(1) == 2 - - def test_while_func(self): - while_func = self.build_cfunc(snippet.while_func) - assert while_func(10) == 55 - - def test_nested_whiles(self): - nested_whiles = self.build_cfunc(snippet.nested_whiles) - assert nested_whiles(111, 114) == ( - '...!...!...!...!...!') - - def test_poor_man_range(self): - poor_man_range = self.build_cfunc(snippet.poor_man_range) - assert poor_man_range(10) == range(10) - - def poor_man_rev_range(self): - poor_man_rev_range = self.build_cfunc(snippet.poor_man_rev_range) - assert poor_man_rev_range(10) == range(9,-1,-1) - - def test_simple_id(self): - #we just want to see, if renaming of parameter works correctly - #if the first branch is the end branch - simple_id = self.build_cfunc(snippet.simple_id) - assert simple_id(9) == 9 - - def test_branch_id(self): - branch_id = self.build_cfunc(snippet.branch_id) - assert branch_id(1, 2, 3) == 2 - assert branch_id(0, 2, 3) == 3 - - def test_int_id(self): - int_id = self.build_cfunc(snippet.int_id) - assert int_id(3) == 3 - - def dont_test_attrs(self): - attrs = self.build_cfunc(snippet.attrs) - assert attrs() == 9 - - def test_builtinusage(self): - fun = self.build_cfunc(snippet.builtinusage) - assert fun() == 4 - - def test_sieve(self): - sieve = self.build_cfunc(snippet.sieve_of_eratosthenes) - assert sieve() == 1028 - - def test_slice(self): - half = self.build_cfunc(snippet.half_of_n) - assert half(10) == 5 - - def test_poly_branch(self): - poly_branch = self.build_cfunc(snippet.poly_branch) - assert poly_branch(10) == [1,2,3]*2 - assert poly_branch(0) == ['a','b','c']*2 - - def test_and(self): - sand = self.build_cfunc(snippet.s_and) - assert sand(5, 6) == "yes" - assert sand(5, 0) == "no" - assert sand(0, 6) == "no" - assert sand(0, 0) == "no" - -class TestTypedTestCase: - - def getcompiled(self, func): - t = Translator(func) - t.simplify() - # builds starting-types from func_defs - argstypelist = [] - if func.func_defaults: - for spec in func.func_defaults: - if isinstance(spec, tuple): - spec = spec[0] # use the first type only for the tests - argstypelist.append(spec) - t.annotate(argstypelist) - return py.test.skip_on_error(t.compile) - - def test_set_attr(self): - set_attr = self.getcompiled(snippet.set_attr) - assert set_attr() == 2 - - def test_inheritance2(self): - inheritance2 = self.getcompiled(snippet.inheritance2) - assert inheritance2() == ((-12, -12), (3, "world")) - - def test_factorial2(self): - factorial2 = self.getcompiled(snippet.factorial2) - assert factorial2(5) == 120 - - def test_factorial(self): - factorial = self.getcompiled(snippet.factorial) - assert factorial(5) == 120 - - def test_simple_method(self): - simple_method = self.getcompiled(snippet.simple_method) - assert simple_method(55) == 55 - - def test_sieve_of_eratosthenes(self): - sieve_of_eratosthenes = self.getcompiled(snippet.sieve_of_eratosthenes) - assert sieve_of_eratosthenes() == 1028 - - def test_nested_whiles(self): - nested_whiles = self.getcompiled(snippet.nested_whiles) - assert nested_whiles(5,3) == '!!!!!' - - def test_call_five(self): - call_five = self.getcompiled(snippet.call_five) - assert call_five() == [5] diff --git a/pypy/translator/test/test_sourcegen.py b/pypy/translator/test/test_sourcegen.py deleted file mode 100644 index bbf6f43b07..0000000000 --- a/pypy/translator/test/test_sourcegen.py +++ /dev/null @@ -1,98 +0,0 @@ - -import autopath -import py -from pypy.tool.udir import udir - -from pypy.translator.genpyrex import GenPyrex -from pypy.objspace.flow.model import * - -from pypy.translator.tool.buildpyxmodule import make_module_from_pyxstring -#from pypy.translator.test.make_dot import make_ps - -# XXX this tries to make compiling faster for full-scale testing -from pypy.translator.tool import buildpyxmodule -buildpyxmodule.enable_fast_compilation() - - -class TestSourceGenTestCase: - def test_simple_func(self): - """ - one test source: - def f(x): - return x+1 - """ - x = Variable("x") - result = Variable("result") - op = SpaceOperation("add", [x, Constant(1)], result) - block = Block([x]) - fun = FunctionGraph("f", block) - block.operations.append(op) - block.closeblock(Link([result], fun.returnblock)) - result = GenPyrex(fun).emitcode() - mod = py.test.skip_on_error( - make_module_from_pyxstring, 'test_source1', udir, result) - assert mod.f(1) == 2 - - def test_if(self): - """ - one test source: - def f(i, j): - if i < 0: - i = j - return i - """ - i = Variable("i") - j = Variable("j") - - conditionres = Variable("conditionres") - conditionop = SpaceOperation("lt", [i, Constant(0)], conditionres) - startblock = Block([i, j]) - - fun = FunctionGraph("f", startblock) - startblock.operations.append(conditionop) - startblock.exitswitch = conditionres - startblock.closeblock(Link([i], fun.returnblock, False), - Link([j], fun.returnblock, True)) - - result = GenPyrex(fun).emitcode() - mod = py.test.skip_on_error( - make_module_from_pyxstring, 'test_source2', udir, result) - assert mod.f(-1, 42) == 42 - assert mod.f(3, 5) == 3 - - def test_while_sum(self): - """ - one test source: - def f(i): - sum = 0 - while i > 0: - sum = sum + i - i = i - 1 - return sum - """ - i = Variable("i") - sum = Variable("sum") - - conditionres = Variable("conditionres") - conditionop = SpaceOperation("gt", [i, Constant(0)], conditionres) - decop = SpaceOperation("add", [i, Constant(-1)], i) - addop = SpaceOperation("add", [i, sum], sum) - startblock = Block([i]) - headerblock = Block([i, sum]) - whileblock = Block([i, sum]) - - fun = FunctionGraph("f", startblock) - startblock.closeblock(Link([i, Constant(0)], headerblock)) - headerblock.operations.append(conditionop) - headerblock.exitswitch = conditionres - headerblock.closeblock(Link([sum], fun.returnblock, False), - Link([i, sum], whileblock, True)) - whileblock.operations.append(addop) - whileblock.operations.append(decop) - whileblock.closeblock(Link([i, sum], headerblock)) - - result = GenPyrex(fun).emitcode() - mod = py.test.skip_on_error( - make_module_from_pyxstring, 'test_source4', udir, result) - assert mod.f(3) == 6 - assert mod.f(-3) == 0 diff --git a/pypy/translator/test/test_translator.py b/pypy/translator/test/test_translator.py deleted file mode 100644 index 15382f6c4e..0000000000 --- a/pypy/translator/test/test_translator.py +++ /dev/null @@ -1,4 +0,0 @@ -import autopath -from pypy.translator.translator import Translator - -from pypy.translator.test import snippet diff --git a/pypy/translator/tool/__init__.py b/pypy/translator/tool/__init__.py deleted file mode 100644 index 1bb8bf6d7f..0000000000 --- a/pypy/translator/tool/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/pypy/translator/tool/autopath.py b/pypy/translator/tool/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/translator/tool/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/translator/tool/benchmark.py b/pypy/translator/tool/benchmark.py deleted file mode 100644 index 449464d13c..0000000000 --- a/pypy/translator/tool/benchmark.py +++ /dev/null @@ -1,34 +0,0 @@ -import autopath -from pypy.tool import testit -from pypy.tool.udir import udir -from pypy.translator.tool.buildpyxmodule import build_cfunc -from pypy.translator.test.test_cltrans import global_cl, make_cl_func - -def benchmark(func): - try: - func = func.im_func - except AttributeError: - pass - c_func = build_cfunc(func, dot=False) - if global_cl: - cl_func = make_cl_func(func) - print "generated c-func for", func.func_name - t1 = timeit(100, func) - t2 = timeit(100, c_func) - if global_cl: - t3 = timeit(100, cl_func) - print "cpython func ", t1, "seconds" - print "pypy/pyrex/cmodule ", t2, "seconds" - if global_cl: - print "cl (experimental) ", t3, "seconds", global_cl - -def timeit(num, func, *args): - from time import time as now - start = now() - for i in xrange(num): - func(*args) - return now()-start - -if __name__ == '__main__': - from pypy.translator.test.snippet import sieve_of_eratosthenes - benchmark(sieve_of_eratosthenes) diff --git a/pypy/translator/tool/buildcl.py b/pypy/translator/tool/buildcl.py deleted file mode 100644 index 14c43cbecc..0000000000 --- a/pypy/translator/tool/buildcl.py +++ /dev/null @@ -1,68 +0,0 @@ -import autopath - -from pypy.objspace.flow import FlowObjSpace -from pypy.translator.gencl import GenCL -from py.process import cmdexec - -class Literal: - def __init__(self, val): - self.val = val - -def readlisp(s): - # Return bool/int/str or give up - import string - s = s.strip() - if s == "T": - return True - elif s == "NIL": - return False - elif s[0] == '"': - return s[1:-1] - elif s.strip(string.digits) == '': - return int(s) - else: - return Literal(s) - -def writelisp(gen, obj): - #if isinstance(obj, (bool, int, type(None), str)): - if isinstance(obj, (int, type(None), str)): - return gen.conv(obj) - if isinstance(obj, (tuple, list)): - content = ' '.join([writelisp(gen, elt) for elt in obj]) - content = '(' + content + ')' - if isinstance(obj, list): - content = '#' + content - elif isinstance(obj, tuple): - content = "'" + content # quote Lisp list - return content - -def _make_cl_func(func, cl, path, argtypes=[]): - fun = FlowObjSpace().build_flow(func) - gen = GenCL(fun, argtypes) - out = gen.globaldeclarations() + '\n' + gen.emitcode() - i = 1 - fpath = path.join("%s.lisp" % fun.name) - def _(*args): - fpath.write(out) - fp = file(str(fpath), "a") - print >>fp, "(write (", fun.name, - for arg in args: - print >>fp, writelisp(gen, arg), - print >>fp, "))" - fp.close() - output = cmdexec("%s %s" % (cl, str(fpath))) - return readlisp(output) - return _ - -if __name__ == '__main__': - # for test - # ultimately, GenCL's str and conv will move to here - def f(): pass - fun = FlowObjSpace().build_flow(f) - gen = GenCL(fun) - - what = [True, "universe", 42, None, ("of", "them", ["eternal", 95])] - it = writelisp(gen, what) - print what - print it - assert it == '#(t "universe" 42 nil ("of" "them" #("eternal" 95)))' diff --git a/pypy/translator/tool/buildpyxmodule.py b/pypy/translator/tool/buildpyxmodule.py deleted file mode 100644 index d45c81fb7e..0000000000 --- a/pypy/translator/tool/buildpyxmodule.py +++ /dev/null @@ -1,187 +0,0 @@ -import autopath -from pypy.tool.udir import udir - -from py.process import cmdexec -from py import path -from pypy.translator.genpyrex import GenPyrex - -import os, sys, inspect, re -from pypy.translator.tool import stdoutcapture - -debug = 0 - -def make_module_from_pyxstring(name, dirpath, string): - dirpath = path.local(dirpath) - pyxfile = dirpath.join('%s.pyx' % name) - i = 0 - while pyxfile.check(): - pyxfile = pyxfile.new(basename='%s%d.pyx' % (name, i)) - i+=1 - pyxfile.write(string) - if debug: print "made pyxfile", pyxfile - cfile = make_c_from_pyxfile(pyxfile) - module = make_module_from_c(cfile) - #print "made module", module - return module - -def compiler_command(): - # e.g. for tcc, you might set this to - # "tcc -shared -o %s.so %s.c" - return os.getenv('PYPY_CC') - -def enable_fast_compilation(): - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - opt = gcv.get('OPT') # not always existent - if opt: - opt = re.sub('-O\d+', '-O0', opt) - else: - opt = '-O0' - gcv['OPT'] = opt - -def make_module_from_c(cfile, include_dirs=None): - #try: - # from distutils.log import set_threshold - # set_threshold(10000) - #except ImportError: - # print "ERROR IMPORTING" - # pass - if include_dirs is None: - include_dirs = [] - - dirpath = cfile.dirpath() - lastdir = path.local() - os.chdir(str(dirpath)) - try: - modname = cfile.purebasename - if debug: print "modname", modname - c = stdoutcapture.Capture(mixed_out_err = True) - try: - try: - if compiler_command(): - # GCC-ish options only - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - cmd = compiler_command().replace('%s', - str(dirpath.join(modname))) - for dir in [gcv['INCLUDEPY']] + list(include_dirs): - cmd += ' -I%s' % dir - cmdexec(cmd) - else: - from distutils.core import setup - from distutils.extension import Extension - saved_environ = os.environ.items() - try: - setup( - name = "testmodules", - ext_modules=[ - Extension(modname, [str(cfile)], - include_dirs=include_dirs) - ], - script_name = 'setup.py', - script_args = ['-q', 'build_ext', '--inplace'] - #script_args = ['build_ext', '--inplace'] - ) - finally: - for key, value in saved_environ: - if os.environ.get(key) != value: - os.environ[key] = value - finally: - foutput, foutput = c.done() - except: - data = foutput.read() - fdump = open("%s.errors" % modname, "w") - fdump.write(data) - fdump.close() - print data - raise - # XXX do we need to do some check on fout/ferr? - # XXX not a nice way to import a module - if debug: print "inserting path to sys.path", dirpath - sys.path.insert(0, '.') - if debug: print "import %(modname)s as testmodule" % locals() - exec "import %(modname)s as testmodule" % locals() - sys.path.pop(0) - finally: - os.chdir(str(lastdir)) - #if not debug: - #dirpath.rmtree() - return testmodule - -def make_c_from_pyxfile(pyxfile): - from Pyrex.Compiler.Main import CompilationOptions, Context, PyrexError - try: - options = CompilationOptions(show_version = 0, - use_listing_file = 0, - c_only = 1, - output_file = None) - context = Context(options.include_path) - result = context.compile(str(pyxfile), options) - if result.num_errors > 0: - raise ValueError, "failure %s" % result - except PyrexError, e: - print >>sys.stderr, e - cfile = pyxfile.new(ext='.c') - return cfile - -def build_cfunc(func, simplify=1, dot=1, inputargtypes=None): - """ return a pyrex-generated cfunction from the given func. - - simplify is true -> perform simplifications on the flowgraph. - dot is true -> generate a dot-configuration file and postscript. - inputargtypes is a list (allowed to be empty) -> - then annotation will be performed before generating - dot/pyrex/c code. - - """ - try: func = func.im_func - except AttributeError: pass - - # build the flow graph - from pypy.objspace.flow import Space - space = Space() - name = func.func_name - funcgraph = space.build_flow(func) - - if not inputargtypes: - source = inspect.getsource(func) - base = udir.join(name).new(ext='.py').write(source) - - if dot: - from pypy.translator.tool.make_dot import FlowGraphDotGen - dotgen = FlowGraphDotGen(name) - dotgen.emit_subgraph(name, funcgraph) - - # apply transformations - if simplify: - from pypy.translator.simplify import simplify_graph - simplify_graph(funcgraph) - name += '_s' - - # get the pyrex generator - genpyrex = GenPyrex(funcgraph) - - # generate pyrex (without type inference) - - # apply type inference - if inputargtypes is not None: - genpyrex.annotate(inputargtypes) - name += '_t' - #a = Annotator(self.functiongraph) - #a.build_types(input_arg_types) - #a.simplify() - - pyxstring = genpyrex.emitcode() - #funcgraph.source = inspect.getsource(func) - else: - pyxstring = genpyrex.emitcode() - - pyxheader = genpyrex.globaldeclarations() - mod = make_module_from_pyxstring(name, udir, pyxheader + '\n' + pyxstring) - - if dot: - if name != func.func_name: # if some transformations have been done - dotgen.emit_subgraph(name, funcgraph) - dotgen.generate() - - return getattr(mod, func.func_name) diff --git a/pypy/translator/tool/cmuclinvoke.sh b/pypy/translator/tool/cmuclinvoke.sh deleted file mode 100644 index 1f369594f0..0000000000 --- a/pypy/translator/tool/cmuclinvoke.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -# Copy this to your PATH -lisp -batch -eval "(compile-file \"$1\")(quit)" >/dev/null 2>&1 -lisp -batch -quiet -eval "(load (compile-file-pathname \"$1\"))(quit)" diff --git a/pypy/translator/tool/flowtrace.py b/pypy/translator/tool/flowtrace.py deleted file mode 100644 index 3cc319f538..0000000000 --- a/pypy/translator/tool/flowtrace.py +++ /dev/null @@ -1,394 +0,0 @@ -""" - -This is an experimental hack - use it at your peril! I (rxe) needed some sanity -check when (attempting) to write gencpp.py and this ended up being a useful -exercise. I doubt it much use to anyone else or the rest of the project, but it -might have some interesting possiblities later on once we have full translation. -:-) - -The idea is simple - take the basic blocks output via the flow object space and -evaluate them against the standard object space. It is effectively the same as -creating a C extension to the CPython interpreter to use the CPython API instead -of interpreting bytecodes - except in this case the we don't need to compile -anything and all space operations are emitted on the fly. One might call this a -flow interpreter - I wouldn't go that far! - -""" -import autopath -from pypy.objspace.flow import FlowObjSpace -from pypy.objspace.flow.model import traverse, Constant, Variable, Block, Link -from pypy.translator.simplify import simplify_graph -from pypy.interpreter.baseobjspace import OperationError -from pypy.interpreter.argument import Arguments - - -class FlowTracer(object): - - def __init__(self, flow_space, space, debug = False, trace = True): - self.space = space - self.flow_space = flow_space - self.trace = trace - self.debug = debug - - def pprint(self, v): - s = self.space.unwrap(self.space.repr(v.e_value)) - if len(s) > 30: - s = s[:27] + "..." - - if isinstance(v, Constant): - s = "Const(%s)" % s - - elif isinstance(v, Variable): - s = "%s(%s)" % (v, s) - - else: - assert False, "really?" - return s - - - def get_blocknames(self, graph): - blocknames = {} - def visit(n): - if isinstance(n, Block): - blocknames[n] = 'block%d' % len(blocknames) - - traverse(visit, graph) - return blocknames - - - def wrap_constants(self, graph): - all = [] - - def visit(n): - # Note - be careful with uniqueitems and constants, as there can - # multiple constant of the same value (XXX why?) - if isinstance(n, Block): - values = [] - values += n.inputargs - for op in n.operations: - values += op.args - - for ii in values: - all.append(ii) - - all.append(n.exitswitch) - - if isinstance(n, Link): - values = n.args - for ii in values: - all.append(ii) - - traverse(visit, graph) - for ii in all: - - if isinstance(ii, Constant) : - ii.e_value = self.space.wrap(ii.value) - - else: - assert (isinstance(ii, Variable) or ii is None) - - def wrap_linkexits(self, graph): - all = [] - - def visit(n): - if isinstance(n, Link): - all.append(n) - - traverse(visit, graph) - - for l in all: - l.exitcase = self.space.wrap(l.exitcase) - - - def execute_function(self, graph, *args_w): - - curblock = graph.startblock - assert len(curblock.inputargs) == len(args_w) - - # We add value as evaluated values during interpretation - # to variables, constants and exit switches. - # Variables are done on assignment (ie lazily) - for input, arg in zip(curblock.inputargs, args_w): - input.e_value = arg - - # Here we add value attribute with wrap - self.wrap_constants(graph) - self.wrap_linkexits(graph) - - blocknames = self.get_blocknames(graph) - last_exception = None - while True: - - if self.trace: - print 'Entering %s:' % blocknames[curblock] - print ' Input args :- %s' % (", ".join(map(self.pprint, curblock.inputargs))) - print ' Operations :-' - - for op in curblock.operations: - - # Why does op.args have a list in it? - opargs = [a.e_value for a in op.args] - if self.trace: - print ' %s = space.%s(%s)' % (op.result, - op.opname, - ", ".join(map(self.pprint, op.args))) - - if op.opname == "exception": - assert (len(opargs) == 1) - # XXX What we suppose to do with argument??? - if last_exception is not None: - res = last_exception - last_exception = None - else: - res = self.space.w_None - - elif op.opname == "call_args": - assert (len(opargs) >= 2) - shape = self.space.unwrap(opargs[1]) - args = Arguments.fromshape(shape, *opargs[2:]) - res = self.call_args(opargs[0], args) - - else: - # More special cases - spaceop = getattr(self.space, op.opname) - - if op.opname in ("newlist", "newdict", "newtuple"): - # These expect a list, not a *args - res = spaceop(opargs) - - else: - try: - res = spaceop(*opargs) - - # More special case - if op.opname == "is_true": - # Rewrap it! - res = self.space.wrap(res) - - except OperationError, exc: - last_exception = exc.w_type - res = self.space.w_None - - op.result.e_value = res - - if self.trace: - # Cases will likely not be a space object - if curblock.exits and curblock.exitswitch == op.result: - print ' %s := exit(%s)' % (op.result, op.result.e_value) - else: - print ' %s := %s' % (op.result, self.pprint(op.result)) - - # Switch to next block - if curblock.exits: - - # exits (safe code) - exit_link = None - if len(curblock.exits) == 1: - exit_link = curblock.exits[0] - - else: - exit_res = curblock.exitswitch.e_value - for link in curblock.exits: - if self.space.is_true(self.space.eq(exit_res, link.exitcase)): - exit_link = link - break - - assert exit_link is not None - - if self.trace: - print ' Exit to %s :- ' % blocknames[exit_link.target] - - sourceargs = exit_link.args - targetargs = exit_link.target.inputargs - assert len(sourceargs) == len(targetargs) - - for s, t in zip(sourceargs, targetargs): - if self.trace: - print " %s = %s" % (t, s) - - t.e_value = s.e_value - - curblock = exit_link.target - - if self.trace: - print - - elif len(curblock.inputargs) == 2: # exc_cls, exc_value - exc_cls, exc_value = curblock.inputargs - if self.trace: - print "Raising -", - print self.pprint(exc_cls), self.pprint(exc_value) - raise exc_cls.e_value, exc_value.e_value - - else: - result = curblock.inputargs[0] - if self.trace: - print "Returning -", self.pprint(result) - - return result.e_value - - - def call_args(self, w_func, args): - - func = self.space.unwrap(w_func) - if hasattr(func, "func_code"): - graph = self.flow_space.build_flow(func) - simplify_graph(graph) - if self.debug: - debug(func) - scope_w = args.parse(func.name, func.code.signature(), func.defs_w) - return self.execute_function(graph, *scope_w) - - else: - # XXX We could try creating the flow graph by runnning another - # flow objspace under self.space. Hmmm - if only I had - # bigger computer. - - # Instead we cheat (this is great fun when it is a fake type :-)) - if self.trace: - print "WOA! Cheating!", w_func - - return self.space.call_args(w_func, args) - - - def call(self, f, *args): - w = self.space.wrap - args_w = [w(ii) for ii in args] - w_func = w(f) - - res = self.call_args(w_func, Arguments(self.space, args_w)) - return self.space.unwrap(res) - - -def debug(func): - """Shows the control flow graph with annotations if computed. - Requires 'dot' and pygame.""" - from pypy.translator.tool.pygame.graphdisplay import GraphDisplay - from pypy.translator.tool.graphpage import FlowGraphPage - from pypy.translator.translator import Translator - t = Translator(func) - t.simplify() - #t.annotate([int]) - FlowGraphPage(t).display() - -def timeit(num, func, *args): - from time import time as now - start = now() - for i in xrange(num): - print func(*args) - return now() - start - -if __name__ == '__main__': - from pypy.objspace.std import Space - space = Space() - - def create_std_func(app_func): - - import new - from pypy.interpreter.gateway import app2interp - - # Horrible hack (ame needs to start with "app_") - app_func = new.function(app_func.func_code, - app_func.func_globals, - "app_" + app_func.__name__) - - # Create our function - func_gw = app2interp(app_func) - func = func_gw.get_function(space) - w_func = space.wrap(func) - - def f(*args): - args_w = [space.wrap(ii) for ii in args] - args_ = Arguments(space, args_w) - w_result = space.call_args(w_func, args_) - return space.unwrap(w_result) - return f - - def create_flow_func(f): - flow_space = FlowObjSpace() - interpreter = FlowTracer(flow_space, space) - def func(*args): - return interpreter.call(f, *args) - return func - - def do(f, *args): - print "doing %s(%s)" % (f.__name__, ", ".join(map(str, args))) - f_flow = create_flow_func(f) - res = f_flow(*args) - f_norm = create_std_func(f) - res_norm = f_norm(*args) - assert res == res_norm - return res - - def do_flow_only(f, *args): - print "doing %s(%s)" % (f.__name__, ", ".join(map(str, args))) - f_flow = create_flow_func(f) - res = f_flow(*args) - return res - - #///////////////////////////////////////////////////////////////////////////// - - def tests(): - from pypy.translator.test import snippet - - tests = [ - (snippet.if_then_else, 1, 2, 3), - (snippet.if_then_else, 0, 2, 3), - (snippet.my_gcd, 256, 192), - (snippet.is_perfect_number, 81), - (snippet.my_bool, 1), - (snippet.my_bool, 0), - (snippet.two_plus_two,), - #(snippet.sieve_of_eratosthenes,), - (snippet.simple_func, 10), - (snippet.nested_whiles, 1, 10), - (snippet.simple_func, 10), - (snippet.builtinusage,), - (snippet.poor_man_range, 10), - (snippet.poor_man_rev_range, 10), - (snippet.simple_id, 2) , - (snippet.branch_id, 1, "k", 1.0) , - (snippet.branch_id, False, "k", 1.0) , - (snippet.builtinusage,), - (snippet.yast, [1,2,3,4,5]), - (snippet.time_waster, 5), - (snippet.half_of_n, 20), - (snippet.int_id, 20), - (snippet.greet, "world"), - (snippet.choose_last,), - #(snippet.choose_last,), XXX Why does repeating this break? - (snippet.poly_branch, 1), - (snippet.s_and, 1, 1), - (snippet.s_and, 0, 1), - (snippet.s_and, 1, 0), - (snippet.s_and, 0, 0), - (snippet.break_continue, 15), - (snippet.reverse_3, ("k", 1, 1.0)), - (snippet.finallys, ("k", 1, 1.0)), - (snippet.finallys, ("k",)), - (snippet.finallys, []), - (snippet._append_five, []), - (snippet._append_five, [1,2,3]), - ] - for ii in tests: - print do(*ii) - - tests = [ - (snippet.factorial, 4), - (snippet.factorial2, 4), - (snippet.call_five,), - (snippet.build_instance,), - (snippet.set_attr,), - (snippet.merge_setattr, 0), - (snippet.merge_setattr, 1), - # XXX These don't work from test.snippet (haven't tried anymore) - #(snippet.inheritance1,), - #(snippet.inheritance2,), - - ] - - for ii in tests: - print do_flow_only(*ii) - - tests() - diff --git a/pypy/translator/tool/graphpage.py b/pypy/translator/tool/graphpage.py deleted file mode 100644 index c0b6425b36..0000000000 --- a/pypy/translator/tool/graphpage.py +++ /dev/null @@ -1,275 +0,0 @@ -import inspect -from pypy.objspace.flow.model import traverse -from pypy.translator.tool.make_dot import DotGen, make_dot, make_dot_graphs -from pypy.interpreter.pycode import CO_VARARGS, CO_VARKEYWORDS -from pypy.annotation import model -from pypy.annotation.classdef import ClassDef -from pypy.tool.uid import uid - - -class GraphPage: - """Base class for the server-side content of one of the 'pages' - (one graph) sent over to and displayed by the client. - """ - def __init__(self, *args): - self.args = args - - def content(self): - """Compute the content of the page. - This doesn't modify the page in place; it returns a new GraphPage. - """ - if hasattr(self, 'source'): - return self - else: - new = self.__class__() - new.source = '' # '''dot source''' - new.links = {} # {'word': 'statusbar text'} - new.compute(*self.args) # defined in subclasses - return new - - def followlink(self, word): - raise KeyError - - def display(self): - "Display a graph page locally." - from pypy.translator.tool.pygame.graphclient import get_layout - get_layout(self).display() - - -class SingleGraphPage(GraphPage): - """ A GraphPage showing a single precomputed FlowGraph.""" - - def compute(self, graph): - self.source = make_dot(graph.name, graph, target=None) - - -class VariableHistoryGraphPage(GraphPage): - """ A GraphPage showing the history of variable bindings. """ - - def compute(self, translator, name, info, caused_by, history, func_names): - self.linkinfo = {} - self.translator = translator - self.func_names = func_names - dotgen = DotGen('binding') - label = "Most recent binding of %s\\n\\n%s" % (name, nottoowide(info)) - if info.origin is not None: - label += "\\n" + self.createlink(info.origin, 'Originated at') - if caused_by is not None: - label += '\\n' + self.createlink(caused_by) - if info.caused_by_merge is not None: - data = 'unionof%r' % (info.caused_by_merge,) - label += '\\n%s' % nottoowide(data) - - dotgen.emit_node('0', shape="box", color="red", label=label) - for n, (data, caused_by) in zip(range(len(history)), history): - label = nottoowide(data) - if data.origin is not None: - label += "\\n" + self.createlink(data.origin, 'Originated at') - if caused_by is not None: - label += '\\n' + self.createlink(caused_by) - dotgen.emit_node(str(n+1), shape="box", label=label) - dotgen.emit_edge(str(n+1), str(n)) - self.source = dotgen.generate(target=None) - - def createlink(self, position_key, wording='Caused by a call from'): - fn, block, pos = position_key - basename = self.func_names.get(fn, fn.func_name) - linkname = basename - n = 1 - while self.linkinfo.get(linkname, position_key) != position_key: - n += 1 - linkname = '%s_%d' % (basename, n) - self.linkinfo[linkname] = position_key - # It would be nice to get the block name somehow - blockname = block.__class__.__name__ - self.links[linkname] = '%s, %s, position %r:\n%s' % (basename, - blockname, pos, block.operations[pos]) - return '%s %s' % (wording, linkname) - - def followlink(self, funcname): - fn, block, pos = self.linkinfo[funcname] - # It would be nice to focus on the block - return FlowGraphPage(self.translator, [fn], self.func_names) - - -class FlowGraphPage(GraphPage): - """ A GraphPage showing a Flow Graph (or a few flow graphs). - """ - def compute(self, translator, functions=None, func_names=None): - self.translator = translator - self.annotator = translator.annotator - self.func_names = func_names or {} - functions = functions or translator.functions - graphs = [translator.getflowgraph(func) for func in functions] - gs = [(graph.name, graph) for graph in graphs] - if self.annotator and self.annotator.blocked_functions: - for block, was_annotated in self.annotator.annotated.items(): - if not was_annotated: - block.fillcolor = "red" - self.source = make_dot_graphs(graphs[0].name+"_graph", gs, target=None) - # make the dictionary of links -- one per annotated variable - self.binding_history = {} - self.current_value = {} - self.caused_by = {} - if self.annotator: - for var in self.annotator.bindings: - s_value = self.annotator.binding(var) - info = '%s: %s' % (var.name, s_value) - self.links[var.name] = info - self.current_value[var.name] = s_value - self.caused_by[var.name] = ( - self.annotator.binding_caused_by.get(var)) - for var, history in self.annotator.bindingshistory.items(): - cause_history = ( - self.annotator.binding_cause_history.get(var, [])) - self.binding_history[var.name] = zip(history, cause_history) - - def followlink(self, varname): - # clicking on a variable name shows its binding history - cur_value = self.current_value[varname] - caused_by = self.caused_by[varname] - history = list(self.binding_history.get(varname, [])) - history.reverse() - return VariableHistoryGraphPage(self.translator, varname, cur_value, - caused_by, history, self.func_names) - - -def nottoowide(text, width=72): - parts = str(text).split(' ') - lines = [] - line = parts.pop(0) - for s in parts: - if len(line)+len(s) < width: - line = line + ' ' + s - else: - lines.append(line) - line = s - lines.append(line) - return '\\n'.join(lines) - - -class ClassDefPage(GraphPage): - """A GraphPage showing the attributes of a class. - """ - def compute(self, translator, cdef): - self.translator = translator - dotgen = DotGen(cdef.cls.__name__, rankdir="LR") - - def writecdef(cdef): - dotgen.emit_node(nameof(cdef), color="red", shape="octagon", - label=repr(cdef.cls)) - attrs = cdef.attrs.items() - attrs.sort() - for name, attrdef in attrs: - s_value = attrdef.s_value - dotgen.emit_node(name, shape="box", label=nottoowide(s_value)) - dotgen.emit_edge(nameof(cdef), name, label=name) - - prevcdef = None - while cdef is not None: - writecdef(cdef) - if prevcdef: - dotgen.emit_edge(nameof(cdef), nameof(prevcdef), color="red") - prevcdef = cdef - cdef = cdef.basedef - - self.source = dotgen.generate(target=None) - - -class TranslatorPage(GraphPage): - """A GraphPage showing a the call graph between functions - as well as the class hierarchy.""" - - def compute(self, translator): - self.translator = translator - self.object_by_name = {} - self.name_by_object = {} - dotgen = DotGen('translator') - dotgen.emit('mclimit=15.0') - - # show the call graph - functions = translator.functions - blocked_functions = {} - if translator.annotator: - # don't use translator.annotator.blocked_functions here because - # it is not populated until the annotator finishes. - annotated = translator.annotator.annotated - for fn, graph in translator.flowgraphs.items(): - def visit(node): - if annotated.get(node) is False: - blocked_functions[fn] = True - traverse(visit, graph) - highlight_functions = getattr(translator, 'highlight_functions', {}) # XXX - dotgen.emit_node('entry', fillcolor="green", shape="octagon", - label="Translator\\nEntry Point") - for func in functions: - name = func.func_name - class_ = getattr(func, 'class_', None) - if class_ is not None: - name = '%s.%s' % (class_.__name__, name) - data = self.labelof(func, name) - if func in blocked_functions: - kw = {'fillcolor': 'red'} - elif func in highlight_functions: - kw = {'fillcolor': '#ffcccc'} - else: - kw = {} - dotgen.emit_node(nameof(func), label=data, shape="box", **kw) - dotgen.emit_edge('entry', nameof(functions[0]), color="green") - for f1, f2 in translator.callgraph.itervalues(): - dotgen.emit_edge(nameof(f1), nameof(f2)) - - # show the class hierarchy - if self.translator.annotator: - dotgen.emit_node(nameof(None), color="red", shape="octagon", - label="Root Class\\nobject") - for classdef in self.translator.annotator.getuserclassdefinitions(): - data = self.labelof(classdef, classdef.cls.__name__) - dotgen.emit_node(nameof(classdef), label=data, shape="box") - dotgen.emit_edge(nameof(classdef.basedef), nameof(classdef)) - - self.source = dotgen.generate(target=None) - - # link the function names to the individual flow graphs - for name, obj in self.object_by_name.iteritems(): - if isinstance(obj, ClassDef): - #data = '%s.%s' % (obj.cls.__module__, obj.cls.__name__) - data = repr(obj.cls) - else: - func = obj - try: - source = inspect.getsource(func) - except IOError: # e.g. when func is defined interactively - source = func.func_name - data = '%s:%d\n%s' % (func.func_globals.get('__name__', '?'), - func.func_code.co_firstlineno, - source.split('\n')[0]) - self.links[name] = data - - def labelof(self, obj, objname): - name = objname - i = 1 - while name in self.object_by_name: - i += 1 - name = '%s__%d' % (objname, i) - self.object_by_name[name] = obj - self.name_by_object[obj] = name - return name - - def followlink(self, name): - obj = self.object_by_name[name] - if isinstance(obj, ClassDef): - return ClassDefPage(self.translator, obj) - else: - return FlowGraphPage(self.translator, [obj], self.name_by_object) - - -def nameof(obj, cache={}): - # NB. the purpose of the cache is not performance, but to ensure that - # two objects that compare equal get the same name - try: - return cache[obj] - except KeyError: - result = '%s__0x%x' % (getattr(obj, '__name__', ''), uid(obj)) - cache[obj] = result - return result diff --git a/pypy/translator/tool/graphserver.py b/pypy/translator/tool/graphserver.py deleted file mode 100644 index 3fb8d35a63..0000000000 --- a/pypy/translator/tool/graphserver.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -A socket server for GraphPages. -""" - -import autopath, sys, thread, struct, marshal - - -def run_server(homepage, port=8888, quiet=False, background=False): - import socket - server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server_sock.bind(('', port)) - server_sock.listen(5) - if not quiet: - print >> sys.stderr, 'Accepting connexions on port %d...' % port - def accept_connexions(): - while True: - conn, addr = server_sock.accept() - if not quiet: - print >> sys.stderr, 'Connected by %r.' % (addr,) - thread.start_new_thread(serve_connexion, (conn, homepage)) - if background: - thread.start_new_thread(accept_connexions, ()) - else: - accept_connexions() - - -def recv_all(s, count): - buf = '' - while len(buf) < count: - data = s.recv(count - len(buf)) - if not data: - raise SystemExit # thread exit, rather - buf += data - return buf - -def recv_msg(s): - hdr_size = struct.calcsize("!i") - msg_size, = struct.unpack("!i", recv_all(s, hdr_size)) - msg = recv_all(s, msg_size) - return marshal.loads(msg) - -def send_msg(s, msg): - data = marshal.dumps(msg) - s.sendall(struct.pack("!i", len(data)) + data) - - -def serve_connexion(s, homepage): - pages = {0: homepage} - while True: - key, link = recv_msg(s) - page = pages[key] - if link is not None: - try: - page = page.content().followlink(link) - key = len(pages) - except KeyError: - page = MissingPage() - key = -1 - pages[key] = page - page = page.content() - reply = { - 'key': key, - 'dot': page.source, - 'links': page.links, - } - send_msg(s, reply) - - -class MissingPage: - links = {} - source = ''' -digraph error { -msg [shape="box", label="Error: a link has gone missing.", color="black", fillcolor="red", style="filled"]; -} -''' - def content(self): - return self diff --git a/pypy/translator/tool/make_dot.py b/pypy/translator/tool/make_dot.py deleted file mode 100644 index b49a72bb9e..0000000000 --- a/pypy/translator/tool/make_dot.py +++ /dev/null @@ -1,205 +0,0 @@ -from __future__ import generators - -""" -""" - -import autopath, os -import inspect, linecache -from pypy.objspace.flow.model import * -from pypy.objspace.flow import Space -from pypy.tool.udir import udir -from py.process import cmdexec -from pypy.interpreter.pytraceback import offset2lineno - -class DotGen: - - def __init__(self, graphname, rankdir=None): - self.graphname = graphname - self.lines = [] - self.source = None - self.emit("digraph %s {" % graphname) - if rankdir: - self.emit('rankdir="%s"' % rankdir) - - def generate(self, storedir=None, target='ps'): - source = self.get_source() - if target is None: - return source # unprocessed - if storedir is None: - storedir = udir - pdot = storedir.join('%s.dot' % self.graphname) - pdot.write(source) - ptarget = pdot.new(ext=target) - cmdexec('dot -T%s %s>%s' % (target, str(pdot),str(ptarget))) - return ptarget - - def get_source(self): - if self.source is None: - self.emit("}") - self.source = '\n'.join(self.lines) - del self.lines - return self.source - - def emit(self, line): - self.lines.append(line) - - def enter_subgraph(self, name): - self.emit("subgraph %s {" % (name,)) - - def leave_subgraph(self): - self.emit("}") - - def emit_edge(self, name1, name2, label="", - style="dashed", - color="black", - dir="forward", - weight="5", - ): - d = locals() - attrs = [('%s="%s"' % (x, d[x].replace('"', '\\"'))) - for x in ['label', 'style', 'color', 'dir', 'weight']] - self.emit('edge [%s];' % ", ".join(attrs)) - self.emit('%s -> %s' % (name1, name2)) - - def emit_node(self, name, - shape="diamond", - label="", - color="black", - fillcolor="white", - style="filled", - ): - d = locals() - attrs = [('%s="%s"' % (x, d[x].replace('"', '\\"'))) - for x in ['shape', 'label', 'color', 'fillcolor', 'style']] - self.emit('%s [%s];' % (name, ", ".join(attrs))) - - -class FlowGraphDotGen(DotGen): - - def __init__(self, graphname, rankdir=None): - DotGen.__init__(self, graphname.replace('.', '_'), rankdir) - - def emit_subgraph(self, name, node): - name = name.replace('.', '_') - self.blocks = {} - self.func = None - self.prefix = name - self.enter_subgraph(name) - traverse(self, node) - self.leave_subgraph() - - def blockname(self, block): - i = id(block) - try: - return self.blocks[i] - except KeyError: - self.blocks[i] = name = "%s_%d" % (self.prefix, len(self.blocks)) - return name - - def visit(self, obj): - # ignore for now - return - - def visit_FunctionGraph(self, funcgraph): - name = self.prefix # +'_'+funcgraph.name - data = funcgraph.name - if hasattr(funcgraph, 'source'): - source = funcgraph.source - data += "\\n" + "\\l".join(source.split('\n')) - if hasattr(funcgraph, 'func'): - self.func = funcgraph.func - - self.emit_node(name, label=data, shape="box", fillcolor="green", style="filled") - #('%(name)s [fillcolor="green", shape=box, label="%(data)s"];' % locals()) - self.emit_edge(name, self.blockname(funcgraph.startblock), 'startblock') - #self.emit_edge(name, self.blockname(funcgraph.returnblock), 'returnblock', style="dashed") - - def visit_Block(self, block): - # do the block itself - name = self.blockname(block) - lines = map(repr, block.operations) - lines.append("") - numblocks = len(block.exits) - color = "black" - fillcolor = getattr(block, "fillcolor", "white") - if not numblocks: - shape = "box" - fillcolor="green" - if len(block.inputargs) == 1: - lines[-1] += 'return %s' % tuple(block.inputargs) - elif len(block.inputargs) == 2: - lines[-1] += 'raise %s, %s' % tuple(block.inputargs) - elif numblocks == 1: - shape = "box" - else: - color = "red" - lines.append("exitswitch: %s" % block.exitswitch) - shape = "octagon" - - iargs = " ".join(map(repr, block.inputargs)) - data = "%s(%s)\\ninputargs: %s\\n\\n" % (name, block.__class__.__name__, iargs) - if block.operations and self.func: - maxoffs = max([op.offset for op in block.operations]) - if maxoffs >= 0: - minoffs = min([op.offset for op in block.operations - if op.offset >= 0]) - minlineno = offset2lineno(self.func.func_code, minoffs) - maxlineno = offset2lineno(self.func.func_code, maxoffs) - filename = inspect.getsourcefile(self.func) - source = "\l".join([linecache.getline(filename, line).rstrip() - for line in range(minlineno, maxlineno+1)]) - if minlineno == maxlineno: - data = data + r"line %d:\n%s\l\n" % (minlineno, source) - else: - data = data + r"lines %d-%d:\n%s\l\n" % (minlineno, - maxlineno, source) - - data = data + "\l".join(lines) - - self.emit_node(name, label=data, shape=shape, color=color, style="filled", fillcolor=fillcolor) - - # do links/exits - if numblocks == 1: - name2 = self.blockname(block.exits[0].target) - label = " ".join(map(repr, block.exits[0].args)) - self.emit_edge(name, name2, label, style="solid") - elif numblocks >1: - for link in block.exits: - name2 = self.blockname(link.target) - label = " ".join(map(repr, link.args)) - label = "%s: %s" %(link.exitcase, label) - self.emit_edge(name, name2, label, style="dotted", color=color) - - -def make_dot(graphname, graph, storedir=None, target='ps'): - return make_dot_graphs(graph.name, [(graphname, graph)], storedir, target) - -def show_dot(graph, storedir = None, target = 'ps'): - name = graph.name - fn = make_dot(name, graph, storedir, target) - os.system('gv %s' % fn) - -def make_dot_graphs(basefilename, graphs, storedir=None, target='ps'): - dotgen = FlowGraphDotGen(basefilename) - names = {basefilename: True} - for graphname, graph in graphs: - if graphname in names: - i = 2 - while graphname + str(i) in names: - i += 1 - graphname = graphname + str(i) - names[graphname] = True - dotgen.emit_subgraph(graphname, graph) - return dotgen.generate(storedir, target) - - -if __name__ == '__main__': - def f(x): - i = 0 - while i < x: - i += 1 - return i - - space = Space() - graph = space.build_flow(f) - make_dot('f', graph) diff --git a/pypy/translator/tool/pygame/VeraMoBd.ttf b/pypy/translator/tool/pygame/VeraMoBd.ttf Binary files differdeleted file mode 100644 index 9be6547ed6..0000000000 --- a/pypy/translator/tool/pygame/VeraMoBd.ttf +++ /dev/null diff --git a/pypy/translator/tool/pygame/__init__.py b/pypy/translator/tool/pygame/__init__.py deleted file mode 100644 index 1bb8bf6d7f..0000000000 --- a/pypy/translator/tool/pygame/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/pypy/translator/tool/pygame/autopath.py b/pypy/translator/tool/pygame/autopath.py deleted file mode 100644 index 8308fed460..0000000000 --- a/pypy/translator/tool/pygame/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - checkpaths = sys.path[:] - pypy_root = os.path.join(head, '') - - while checkpaths: - orig = checkpaths.pop() - if os.path.join(os.path.realpath(orig), '').startswith(pypy_root): - sys.path.remove(orig) - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - fn = getattr(mod, '__file__', None) - if '.' in name or not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/pypy/translator/tool/pygame/cyrvetic.ttf b/pypy/translator/tool/pygame/cyrvetic.ttf Binary files differdeleted file mode 100755 index 2d20d5f082..0000000000 --- a/pypy/translator/tool/pygame/cyrvetic.ttf +++ /dev/null diff --git a/pypy/translator/tool/pygame/drawgraph.py b/pypy/translator/tool/pygame/drawgraph.py deleted file mode 100644 index 3e9e7b2504..0000000000 --- a/pypy/translator/tool/pygame/drawgraph.py +++ /dev/null @@ -1,562 +0,0 @@ -""" -A custom graphic renderer for the '.plain' files produced by dot. - -""" - -from __future__ import generators -import autopath -import re, os, math -import pygame -from pygame.locals import * - - -FONT = os.path.join(autopath.this_dir, 'cyrvetic.ttf') -COLOR = { - 'black': (0,0,0), - 'white': (255,255,255), - 'red': (255,0,0), - 'green': (0,255,0), - } -re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') - -def combine(color1, color2, alpha): - r1, g1, b1 = color1 - r2, g2, b2 = color2 - beta = 1.0 - alpha - return (int(r1 * alpha + r2 * beta), - int(g1 * alpha + g2 * beta), - int(b1 * alpha + b2 * beta)) - - -def highlight_color(color): - if color == (0, 0, 0): # black becomes magenta - return (255, 0, 255) - elif color == (255, 255, 255): # white becomes yellow - return (255, 255, 0) - intensity = sum(color) - if intensity > 191 * 3: - return combine(color, (128, 192, 0), 0.2) - else: - return combine(color, (255, 255, 0), 0.2) - -def getcolor(name, default): - if name in COLOR: - return COLOR[name] - elif name.startswith('#') and len(name) == 7: - rval = COLOR[name] = (int(name[1:3],16), int(name[3:5],16), int(name[5:7],16)) - return rval - else: - return default - - -class GraphLayout: - - def __init__(self, filename): - # parse the layout file (.plain format) - lines = open(str(filename), 'r').readlines() - for i in range(len(lines)-2, -1, -1): - if lines[i].endswith('\\\n'): # line ending in '\' - lines[i] = lines[i][:-2] + lines[i+1] - del lines[i+1] - header = splitline(lines.pop(0)) - assert header[0] == 'graph' - self.scale = float(header[1]) - self.boundingbox = float(header[2]), float(header[3]) - self.nodes = {} - self.edges = [] - for line in lines: - line = splitline(line) - if line[0] == 'node': - n = Node(*line[1:]) - self.nodes[n.name] = n - if line[0] == 'edge': - self.edges.append(Edge(self.nodes, *line[1:])) - if line[0] == 'stop': - break - self.links = {} - - def display(self): - from pypy.translator.tool.pygame.graphdisplay import GraphDisplay - GraphDisplay(self).run() - - def reload(self): - return self - -class Node: - def __init__(self, name, x, y, w, h, label, style, shape, color, fillcolor): - self.name = name - self.x = float(x) - self.y = float(y) - self.w = float(w) - self.h = float(h) - self.label = label - self.style = style - self.shape = shape - self.color = color - self.fillcolor = fillcolor - self.highlight = False - - def sethighlight(self, which): - self.highlight = bool(which) - -class Edge: - label = None - - def __init__(self, nodes, tail, head, cnt, *rest): - self.tail = nodes[tail] - self.head = nodes[head] - cnt = int(cnt) - self.points = [(float(rest[i]), float(rest[i+1])) - for i in range(0, cnt*2, 2)] - rest = rest[cnt*2:] - if len(rest) > 2: - self.label, xl, yl = rest[:3] - self.xl = float(xl) - self.yl = float(yl) - rest = rest[3:] - self.style, self.color = rest - self.highlight = False - - def sethighlight(self, which): - self.highlight = bool(which) - - def bezierpoints(self, resolution=8): - result = [] - pts = self.points - for i in range(0, len(pts)-3, 3): - result += beziercurve(pts[i], pts[i+1], - pts[i+2], pts[i+3], resolution) - return result - - def arrowhead(self): - bottom_up = self.points[0][1] > self.points[-1][1] - if (self.tail.y > self.head.y) != bottom_up: # reversed edge - head = 0 - dir = 1 - else: - head = -1 - dir = -1 - n = 1 - while True: - try: - x0, y0 = self.points[head] - x1, y1 = self.points[head+n*dir] - except IndexError: - return [] - vx = x0-x1 - vy = y0-y1 - try: - f = 0.12 / math.sqrt(vx*vx + vy*vy) - vx *= f - vy *= f - return [(x0 + 0.9*vx, y0 + 0.9*vy), - (x0 + 0.4*vy, y0 - 0.4*vx), - (x0 - 0.4*vy, y0 + 0.4*vx)] - except (ZeroDivisionError, ValueError): - n += 1 - -def beziercurve((x0,y0), (x1,y1), (x2,y2), (x3,y3), resolution=8): - result = [] - f = 1.0/(resolution-1) - for i in range(resolution): - t = f*i - t0 = (1-t)*(1-t)*(1-t) - t1 = t *(1-t)*(1-t) * 3.0 - t2 = t * t *(1-t) * 3.0 - t3 = t * t * t - result.append((x0*t0 + x1*t1 + x2*t2 + x3*t3, - y0*t0 + y1*t1 + y2*t2 + y3*t3)) - return result - -def segmentdistance((x0,y0), (x1,y1), (x,y)): - "Distance between the point (x,y) and the segment (x0,y0)-(x1,y1)." - vx = x1-x0 - vy = y1-y0 - try: - l = math.sqrt(vx*vx+vy*vy) - vx /= l - vy /= l - dlong = vx*(x-x0) + vy*(y-y0) - except (ZeroDivisionError, ValueError): - dlong = -1 - if dlong < 0.0: - return math.sqrt((x-x0)*(x-x0) + (y-y0)*(y-y0)) - elif dlong > l: - return math.sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1)) - else: - return abs(vy*(x-x0) - vx*(y-y0)) - -def splitline(line, re_word = re.compile(r'[^\s"]\S*|["]["]|["].*?[^\\]["]')): - result = [] - for word in re_word.findall(line): - if word.startswith('"'): - word = eval(word) - result.append(word) - return result - - -class GraphRenderer: - MARGIN = 0.6 - SCALEMIN = 3 - SCALEMAX = 100 - FONTCACHE = {} - - def __init__(self, screen, graphlayout, scale=75): - self.graphlayout = graphlayout - self.setscale(scale) - self.setoffset(0, 0) - self.screen = screen - self.textzones = [] - self.highlightwords = {} - - def setscale(self, scale): - scale = max(min(scale, self.SCALEMAX), self.SCALEMIN) - self.scale = float(scale) - w, h = self.graphlayout.boundingbox - self.margin = int(self.MARGIN * scale) - self.width = int(w * scale) + (2 * self.margin) - self.height = int(h * scale) + (2 * self.margin) - self.bboxh = h - size = int(15 * (scale-10) / 75) - self.font = self.getfont(size) - - def getfont(self, size): - if size in self.FONTCACHE: - return self.FONTCACHE[size] - elif size < 4: - self.FONTCACHE[size] = None - return None - else: - font = self.FONTCACHE[size] = pygame.font.Font(FONT, size) - return font - - def setoffset(self, offsetx, offsety): - "Set the (x,y) origin of the rectangle where the graph will be rendered." - self.ofsx = offsetx - self.margin - self.ofsy = offsety - self.margin - - def shiftoffset(self, dx, dy): - self.ofsx += dx - self.ofsy += dy - - def getcenter(self): - w, h = self.screen.get_size() - return self.revmap(w//2, h//2) - - def setcenter(self, x, y): - w, h = self.screen.get_size() - x, y = self.map(x, y) - self.shiftoffset(x-w//2, y-h//2) - - def shiftscale(self, factor, fix=None): - if fix is None: - fixx, fixy = self.screen.get_size() - fixx //= 2 - fixy //= 2 - else: - fixx, fixy = fix - x, y = self.revmap(fixx, fixy) - self.setscale(self.scale * factor) - newx, newy = self.map(x, y) - self.shiftoffset(newx - fixx, newy - fixy) - - def reoffset(self, swidth, sheight): - offsetx = noffsetx = self.ofsx - offsety = noffsety = self.ofsy - width = self.width - height = self.height - - # if it fits, center it, otherwise clamp - if width <= swidth: - noffsetx = (width - swidth) // 2 - else: - noffsetx = min(max(0, offsetx), width - swidth) - - if height <= sheight: - noffsety = (height - sheight) // 2 - else: - noffsety = min(max(0, offsety), height - sheight) - - self.ofsx = noffsetx - self.ofsy = noffsety - - def getboundingbox(self): - "Get the rectangle where the graph will be rendered." - return (-self.ofsx, -self.ofsy, self.width, self.height) - - def visible(self, x1, y1, x2, y2): - """Is any part of the box visible (i.e. within the bounding box)? - - We have to perform clipping ourselves because with big graphs the - coordinates may sometimes become longs and cause OverflowErrors - within pygame. - """ - return (x1 < self.width-self.ofsx and x2 > -self.ofsx and - y1 < self.height-self.ofsy and y2 > -self.ofsy) - - def map(self, x, y): - return (int(x*self.scale) - (self.ofsx - self.margin), - int((self.bboxh-y)*self.scale) - (self.ofsy - self.margin)) - - def revmap(self, px, py): - return ((px + (self.ofsx - self.margin)) / self.scale, - self.bboxh - (py + (self.ofsy - self.margin)) / self.scale) - - def draw_node_commands(self, node): - xcenter, ycenter = self.map(node.x, node.y) - boxwidth = int(node.w * self.scale) - boxheight = int(node.h * self.scale) - fgcolor = getcolor(node.color, (0,0,0)) - bgcolor = getcolor(node.fillcolor, (255,255,255)) - if node.highlight: - fgcolor = highlight_color(fgcolor) - bgcolor = highlight_color(bgcolor) - - text = node.label - lines = text.replace('\\l','\\l\n').replace('\r','\r\n').split('\n') - # ignore a final newline - if not lines[-1]: - del lines[-1] - wmax = 0 - hmax = 0 - commands = [] - bkgndcommands = [] - - if self.font is None: - if lines: - raw_line = lines[0].replace('\\l','').replace('\r','') - if raw_line: - for size in (12, 10, 8, 6, 4): - font = self.getfont(size) - img = TextSnippet(self, raw_line, (0, 0, 0), bgcolor, font=font) - w, h = img.get_size() - if (w >= boxwidth or h >= boxheight): - continue - else: - if w>wmax: wmax = w - def cmd(img=img, y=hmax, w=w): - img.draw(xcenter-w//2, ytop+y) - commands.append(cmd) - hmax += h - break - else: - for line in lines: - raw_line = line.replace('\\l','').replace('\r','') or ' ' - img = TextSnippet(self, raw_line, (0, 0, 0), bgcolor) - w, h = img.get_size() - if w>wmax: wmax = w - if raw_line.strip(): - if line.endswith('\\l'): - def cmd(img=img, y=hmax): - img.draw(xleft, ytop+y) - elif line.endswith('\r'): - def cmd(img=img, y=hmax, w=w): - img.draw(xright-w, ytop+y) - else: - def cmd(img=img, y=hmax, w=w): - img.draw(xcenter-w//2, ytop+y) - commands.append(cmd) - hmax += h - #hmax += 8 - - # we know the bounding box only now; setting these variables will - # have an effect on the values seen inside the cmd() functions above - xleft = xcenter - wmax//2 - xright = xcenter + wmax//2 - ytop = ycenter - hmax//2 - x = xcenter-boxwidth//2 - y = ycenter-boxheight//2 - - if node.shape == 'box': - rect = (x-1, y-1, boxwidth+2, boxheight+2) - def cmd(): - self.screen.fill(bgcolor, rect) - bkgndcommands.append(cmd) - def cmd(): - pygame.draw.rect(self.screen, fgcolor, rect, 1) - commands.append(cmd) - elif node.shape == 'octagon': - step = 1-math.sqrt(2)/2 - points = [(int(x+boxwidth*fx), int(y+boxheight*fy)) - for fx, fy in [(step,0), (1-step,0), - (1,step), (1,1-step), - (1-step,1), (step,1), - (0,1-step), (0,step)]] - def cmd(): - pygame.draw.polygon(self.screen, bgcolor, points, 0) - bkgndcommands.append(cmd) - def cmd(): - pygame.draw.polygon(self.screen, fgcolor, points, 1) - commands.append(cmd) - return bkgndcommands, commands - - def draw_commands(self): - nodebkgndcmd = [] - nodecmd = [] - for node in self.graphlayout.nodes.values(): - cmd1, cmd2 = self.draw_node_commands(node) - nodebkgndcmd += cmd1 - nodecmd += cmd2 - - edgebodycmd = [] - edgeheadcmd = [] - for edge in self.graphlayout.edges: - fgcolor = getcolor(edge.color, (0,0,0)) - if edge.highlight: - fgcolor = highlight_color(fgcolor) - points = [self.map(*xy) for xy in edge.bezierpoints()] - - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) - edgebodycmd.append(drawedgebody) - - points = [self.map(*xy) for xy in edge.arrowhead()] - if points: - def drawedgehead(points=points, fgcolor=fgcolor): - pygame.draw.polygon(self.screen, fgcolor, points, 0) - edgeheadcmd.append(drawedgehead) - - if edge.label: - x, y = self.map(edge.xl, edge.yl) - img = TextSnippet(self, edge.label, (0, 0, 0)) - w, h = img.get_size() - if self.visible(x-w//2, y-h//2, x+w//2, y+h//2): - def drawedgelabel(img=img, x1=x-w//2, y1=y-h//2): - img.draw(x1, y1) - edgeheadcmd.append(drawedgelabel) - - return edgebodycmd + nodebkgndcmd + edgeheadcmd + nodecmd - - def render(self): - bbox = self.getboundingbox() - self.screen.fill((224, 255, 224), bbox) - - # gray off-bkgnd areas - ox, oy, width, height = bbox - dpy_width, dpy_height = self.screen.get_size() - gray = (128, 128, 128) - if ox > 0: - self.screen.fill(gray, (0, 0, ox, dpy_height)) - if oy > 0: - self.screen.fill(gray, (0, 0, dpy_width, oy)) - w = dpy_width - (ox + width) - if w > 0: - self.screen.fill(gray, (dpy_width-w, 0, w, dpy_height)) - h = dpy_height - (oy + height) - if h > 0: - self.screen.fill(gray, (0, dpy_height-h, dpy_width, h)) - - # draw the graph and record the position of texts - del self.textzones[:] - for cmd in self.draw_commands(): - cmd() - - def findall(self, searchstr): - """Return an iterator for all nodes and edges that contain a searchstr. - """ - for item in self.graphlayout.nodes.itervalues(): - if item.label and searchstr in item.label: - yield item - for item in self.graphlayout.edges: - if item.label and searchstr in item.label: - yield item - - def at_position(self, (x, y)): - """Figure out the word under the cursor.""" - for rx, ry, rw, rh, word in self.textzones: - if rx <= x < rx+rw and ry <= y < ry+rh: - return word - return None - - def node_at_position(self, (x, y)): - """Return the Node under the cursor.""" - x, y = self.revmap(x, y) - for node in self.graphlayout.nodes.itervalues(): - if 2.0*abs(x-node.x) <= node.w and 2.0*abs(y-node.y) <= node.h: - return node - return None - - def edge_at_position(self, (x, y), distmax=14): - """Return the Edge near the cursor.""" - # XXX this function is very CPU-intensive and makes the display kinda sluggish - distmax /= self.scale - xy = self.revmap(x, y) - closest_edge = None - for edge in self.graphlayout.edges: - pts = edge.bezierpoints() - for i in range(1, len(pts)): - d = segmentdistance(pts[i-1], pts[i], xy) - if d < distmax: - distmax = d - closest_edge = edge - return closest_edge - - -class TextSnippet: - - def __init__(self, renderer, text, fgcolor, bgcolor=None, font=None): - self.renderer = renderer - self.imgs = [] - self.parts = [] - if font is None: - font = renderer.font - if font is None: - return - parts = self.parts - for word in re_nonword.split(text): - if not word: - continue - if word in renderer.highlightwords: - fg, bg = renderer.highlightwords[word] - bg = bg or bgcolor - else: - fg, bg = fgcolor, bgcolor - parts.append((word, fg, bg)) - # consolidate sequences of words with the same color - for i in range(len(parts)-2, -1, -1): - if parts[i][1:] == parts[i+1][1:]: - word, fg, bg = parts[i] - parts[i] = word + parts[i+1][0], fg, bg - del parts[i+1] - # delete None backgrounds - for i in range(len(parts)): - if parts[i][2] is None: - parts[i] = parts[i][:2] - # render parts - i = 0 - while i < len(parts): - part = parts[i] - word = part[0] - antialias = not re_nonword.match(word) # SDL bug with anti-aliasing - try: - img = font.render(word, antialias, *part[1:]) - except pygame.error: - del parts[i] # Text has zero width - else: - self.imgs.append(img) - i += 1 - - def get_size(self): - if self.imgs: - sizes = [img.get_size() for img in self.imgs] - return sum([w for w,h in sizes]), max([h for w,h in sizes]) - else: - return 0, 0 - - def draw(self, x, y): - for part, img in zip(self.parts, self.imgs): - word = part[0] - self.renderer.screen.blit(img, (x, y)) - w, h = img.get_size() - self.renderer.textzones.append((x, y, w, h, word)) - x += w - - -try: - sum # 2.3 only -except NameError: - def sum(lst): - total = 0 - for item in lst: - total += lst - return total diff --git a/pypy/translator/tool/pygame/graphclient.py b/pypy/translator/tool/pygame/graphclient.py deleted file mode 100644 index a5aba57793..0000000000 --- a/pypy/translator/tool/pygame/graphclient.py +++ /dev/null @@ -1,79 +0,0 @@ -#! /usr/bin/env python -""" -Client for a graph server (either in-process or over a socket). -""" - -import autopath -from pypy.translator.tool.pygame.drawgraph import GraphLayout -from pypy.translator.tool.graphserver import send_msg, recv_msg, MissingPage -from pypy.tool.udir import udir -from py.process import cmdexec - - -DOT_FILE = udir.join('graph.dot') -PLAIN_FILE = udir.join('graph.plain') - - -class ClientGraphLayout(GraphLayout): - - def __init__(self, connexion, key, dot, links, **ignored): - # generate a temporary .dot file and call dot on it - DOT_FILE.write(dot) - cmdexec('dot -Tplain %s>%s' % (str(DOT_FILE),str(PLAIN_FILE))) - GraphLayout.__init__(self, PLAIN_FILE) - self.connexion = connexion - self.key = key - self.links.update(links) - - def followlink(self, name): - return self.connexion.download(self.key, name) - - def reload(self): - return self.connexion.download(self.key) - - -class InProcessConnexion: - - def download(self, page, link=None): - if link is not None: - try: - page = page.content().followlink(link) - except KeyError: - page = MissingPage() - key = page - page = page.content() - return ClientGraphLayout(self, key, page.source, page.links) - - -class SocketConnexion: - - def __init__(self, s): - self.s = s - - def download(self, key, link=None): - send_msg(self.s, (key, link)) - data = recv_msg(self.s) - return ClientGraphLayout(self, **data) - - -def get_layout(homepage): - return InProcessConnexion().download(homepage) - -def get_remote_layout(hostname, port=8888): - import socket - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect((hostname, port)) - return SocketConnexion(s).download(0) - - -if __name__ == '__main__': - import sys - if len(sys.argv) != 2 or sys.argv[1].count(':') != 1: - print >> sys.stderr, 'Connects to a graph server like goal/translate_pypy.' - print >> sys.stderr, 'Usage: %s hostname:port' % (sys.argv[0],) - print >> sys.stderr, ' or %s :port' % (sys.argv[0],) - sys.exit(2) - hostname, port = sys.argv[1].split(':') - port = int(port) - layout = get_remote_layout(hostname, port) - layout.display() diff --git a/pypy/translator/tool/pygame/graphdisplay.py b/pypy/translator/tool/pygame/graphdisplay.py deleted file mode 100644 index f10332b7f9..0000000000 --- a/pypy/translator/tool/pygame/graphdisplay.py +++ /dev/null @@ -1,723 +0,0 @@ -from __future__ import generators -import autopath -import os, time, sys -import pygame -from pygame.locals import * -from pypy.translator.tool.pygame.drawgraph import GraphRenderer -from pypy.translator.tool.pygame.drawgraph import Node, Edge - - -METAKEYS = dict([ - (ident[len('KMOD_'):].lower(), getattr(pygame.locals, ident)) - for ident in dir(pygame.locals) if ident.startswith('KMOD_') and ident != 'KMOD_NONE' -]) - -if sys.platform == 'darwin': - PMETA = 'lmeta', 'rmeta' -else: - PMETA = 'lalt', 'ralt', 'lctrl', 'rctrl' - -METAKEYS['meta'] = PMETA -METAKEYS['shift'] = 'lshift', 'rshift' - -KEYS = dict([ - (ident[len('K_'):].lower(), getattr(pygame.locals, ident)) - for ident in dir(pygame.locals) if ident.startswith('K_') -]) - -KEYS['plus'] = ('=', '+', '.') -KEYS['quit'] = ('q', 'escape') -KEYS['help'] = ('h', '?', 'f1') - -def GET_KEY(key): - if len(key) == 1: - return key - return KEYS[key] - -def permute_mods(base, args): - if not args: - yield base - return - first, rest = args[0], args[1:] - for val in first: - for rval in permute_mods(base | val, rest): - yield rval - -class Display(object): - - def __init__(self, (w,h)=(800,680)): - pygame.init() - self.resize((w,h)) - - def resize(self, (w,h)): - self.width = w - self.height = h - self.screen = pygame.display.set_mode((w, h), HWSURFACE|RESIZABLE, 32) - - -class GraphDisplay(Display): - STATUSBARFONT = os.path.join(autopath.this_dir, 'VeraMoBd.ttf') - ANIM_STEP = 0.07 - KEY_REPEAT = (500, 30) - STATUSBAR_ALPHA = 0.75 - STATUSBAR_FGCOLOR = (255, 255, 80) - STATUSBAR_BGCOLOR = (128, 0, 0) - STATUSBAR_OVERFLOWCOLOR = (255, 0, 0) - HELP_ALPHA = 0.95 - HELP_FGCOLOR = (255, 255, 80) - HELP_BGCOLOR = (0, 128, 0) - INPUT_ALPHA = 0.75 - INPUT_FGCOLOR = (255, 255, 80) - INPUT_BGCOLOR = (0, 0, 128) - - KEYS = { - 'meta -' : ('zoom', 0.5), - '-' : ('zoom', 0.5), - 'meta plus' : ('zoom', 2.0), - 'plus' : ('zoom', 2.0), - 'meta 0' : 'zoom_actual_size', - '0' : 'zoom_actual_size', - 'meta 1' : 'zoom_to_fit', - '1' : 'zoom_to_fit', - 'meta f4' : 'quit', - 'meta quit' : 'quit', - 'quit' : 'quit', - 'meta right' : 'layout_forward', - 'meta left': 'layout_back', - 'backspace' : 'layout_back', - 'f': 'search', - '/': 'search', - 'n': 'find_next', - 'p': 'find_prev', - 'r': 'reload', - 'left' : ('pan', (-1, 0)), - 'right' : ('pan', (1, 0)), - 'up' : ('pan', (0, -1)), - 'down' : ('pan', (0, 1)), - 'shift left' : ('fast_pan', (-1, 0)), - 'shift right' : ('fast_pan', (1, 0)), - 'shift up' : ('fast_pan', (0, -1)), - 'shift down' : ('fast_pan', (0, 1)), - 'help': 'help', - } - - HELP_MSG = """ - Key bindings: - - +, = or . Zoom in - - Zoom out - 1 Zoom to fit - 0 Actual size - - Arrows Scroll - Shift+Arrows Scroll faster - - Backspace Go back in history - Meta Left Go back in history - Meta Right Go forward in history - R Reload the page - - F or / Search for text - N Find next occurrence - P Find previous occurrence - - F1, H or ? This help message - - Q or Esc Quit - - Mouse bindings: - - Click on objects to move around - Drag with the left mouse button to zoom in/out - Drag with the right mouse button to scroll - """.replace('\n ', '\n').strip() # poor man's dedent - - - def __init__(self, layout): - super(GraphDisplay, self).__init__() - self.font = pygame.font.Font(self.STATUSBARFONT, 16) - self.viewers_history = [] - self.forward_viewers_history = [] - self.highlight_word = None - self.highlight_obj = None - self.viewer = None - self.method_cache = {} - self.key_cache = {} - self.ascii_key_cache = {} - self.status_bar_height = 0 - self.searchstr = None - self.searchpos = 0 - self.searchresults = [] - self.initialize_keys() - self.setlayout(layout) - - def initialize_keys(self): - pygame.key.set_repeat(*self.KEY_REPEAT) - for strnames, methodname in self.KEYS.iteritems(): - names = strnames.split() - if not isinstance(methodname, basestring): - methodname, args = methodname[0], methodname[1:] - else: - args = () - method = getattr(self, methodname, None) - if method is None: - print 'Can not implement key mapping %r, %s.%s does not exist' % ( - strnames, self.__class__.__name__, methodname) - continue - - mods = [] - basemod = 0 - keys = [] - for name in names: - if name in METAKEYS: - val = METAKEYS[name] - if not isinstance(val, int): - mods.append(tuple([METAKEYS[k] for k in val])) - else: - basemod |= val - else: - val = GET_KEY(name) - assert len(keys) == 0 - if not isinstance(val, (int, basestring)): - keys.extend([GET_KEY(k) for k in val]) - else: - keys.append(val) - assert keys - for key in keys: - if isinstance(key, int): - for mod in permute_mods(basemod, mods): - self.key_cache[(key, mod)] = (method, args) - else: - for mod in permute_mods(basemod, mods): - char = key.lower() - mod = mod & ~KMOD_SHIFT - self.ascii_key_cache[(char, mod)] = (method, args) - - def help(self): - """Show a help window and wait for a key or a mouse press.""" - margin_x = margin_y = 64 - padding_x = padding_y = 8 - fgcolor = self.HELP_FGCOLOR - bgcolor = self.HELP_BGCOLOR - helpmsg = self.HELP_MSG - width = self.width - 2*margin_x - height = self.height - 2*margin_y - lines = rendertext(helpmsg, self.font, fgcolor, width - 2*padding_x, - height - 2*padding_y) - block = pygame.Surface((width, height), SWSURFACE | SRCALPHA) - block.fill(bgcolor) - sx = padding_x - sy = padding_y - for img in lines: - w, h = img.get_size() - block.blit(img, (sx, sy)) - sy += h - block.set_alpha(int(255 * self.HELP_ALPHA)) - self.screen.blit(block, (margin_x, margin_y)) - - pygame.display.flip() - while True: - e = pygame.event.wait() - if e.type in (MOUSEBUTTONDOWN, KEYDOWN, QUIT): - break - self.must_redraw = True - - def input(self, prompt): - """Ask the user to input something. - - Returns the string that the user entered, or None if the user pressed - Esc. - """ - - def draw(text): - margin_x = margin_y = 0 - padding_x = padding_y = 8 - fgcolor = self.INPUT_FGCOLOR - bgcolor = self.INPUT_BGCOLOR - width = self.width - 2*margin_x - lines = renderline(text, self.font, fgcolor, width - 2*padding_x) - height = totalheight(lines) + 2 * padding_y - block = pygame.Surface((width, height), SWSURFACE | SRCALPHA) - block.fill(bgcolor) - sx = padding_x - sy = padding_y - for img in lines: - w, h = img.get_size() - block.blit(img, (sx, sy)) - sy += h - block.set_alpha(int(255 * self.INPUT_ALPHA)) - # This can be slow. It would be better to take a screenshot - # and use it as the background. - self.viewer.render() - if self.statusbarinfo: - self.drawstatusbar() - self.screen.blit(block, (margin_x, margin_y)) - pygame.display.flip() - - draw(prompt) - text = "" - self.must_redraw = True - while True: - events = [pygame.event.wait()] - events.extend(pygame.event.get()) - old_text = text - for e in events: - if e.type == QUIT: - return None - elif e.type == KEYDOWN: - if e.key == K_ESCAPE: - return None - elif e.key == K_RETURN: - return text - elif e.key == K_BACKSPACE: - text = text[:-1] - elif e.unicode and ord(e.unicode) >= ord(' '): - text += e.unicode - if text != old_text: - draw(prompt + text) - - def search(self): - searchstr = self.input('Find: ') - if not searchstr: - return - self.searchstr = searchstr - self.searchpos = -1 - self.searchresults = list(self.viewer.findall(self.searchstr)) - self.find_next() - - def find_next(self): - if not self.searchstr: - return - if self.searchpos + 1 >= len(self.searchresults): - self.setstatusbar('Not found: %s' % self.searchstr) - return - self.searchpos += 1 - self.highlight_found_item() - - def find_prev(self): - if not self.searchstr: - return - if self.searchpos - 1 < 0: - self.setstatusbar('Not found: %s' % self.searchstr) - return - self.searchpos -= 1 - self.highlight_found_item() - - def highlight_found_item(self): - item = self.searchresults[self.searchpos] - self.sethighlight(obj=item) - msg = 'Found %%s containing %s (%d/%d)' % (self.searchstr, - self.searchpos+1, len(self.searchresults)) - if isinstance(item, Node): - self.setstatusbar(msg % 'node') - self.look_at_node(item, keep_highlight=True) - elif isinstance(item, Edge): - self.setstatusbar(msg % 'edge') - self.look_at_edge(item, keep_highlight=True) - else: - # should never happen - self.setstatusbar(msg % item) - - def setlayout(self, layout): - if self.viewer: - self.viewers_history.append(self.viewer) - del self.forward_viewers_history[:] - self.layout = layout - self.viewer = GraphRenderer(self.screen, layout) - self.searchpos = 0 - self.searchresults = [] - self.zoom_to_fit() - - def zoom_actual_size(self): - self.viewer.shiftscale(float(self.viewer.SCALEMAX) / self.viewer.scale) - self.updated_viewer() - - def calculate_zoom_to_fit(self): - return min(float(self.width) / self.viewer.width, - float(self.height) / self.viewer.height, - float(self.viewer.SCALEMAX) / self.viewer.scale) - - def zoom_to_fit(self): - """ - center and scale to view the whole graph - """ - - f = self.calculate_zoom_to_fit() - self.viewer.shiftscale(f) - self.updated_viewer() - - def zoom(self, scale): - self.viewer.shiftscale(max(scale, self.calculate_zoom_to_fit())) - self.updated_viewer() - - def reoffset(self): - self.viewer.reoffset(self.width, self.height) - - def pan(self, (x, y)): - self.viewer.shiftoffset(x * (self.width // 8), y * (self.height // 8)) - self.updated_viewer() - - def fast_pan(self, (x, y)): - self.pan((x * 4, y * 4)) - - def update_status_bar(self): - self.statusbarinfo = None - self.must_redraw = True - if self.viewers_history: - info = 'Press Backspace to go back to previous screen' - else: - info = 'Press H for help' - self.setstatusbar(info) - - def updated_viewer(self, keep_highlight=False): - self.reoffset() - if not keep_highlight: - self.sethighlight() - self.update_status_bar() - self.must_redraw = True - - def layout_back(self): - if self.viewers_history: - self.forward_viewers_history.append(self.viewer) - self.viewer = self.viewers_history.pop() - self.layout = self.viewer.graphlayout - self.updated_viewer() - - def layout_forward(self): - if self.forward_viewers_history: - self.viewers_history.append(self.viewer) - self.viewer = self.forward_viewers_history.pop() - self.layout = self.viewer.graphlayout - self.updated_viewer() - - def reload(self): - self.setstatusbar('reloading...') - self.redraw_now() - newlayout = self.layout.reload() - self.setlayout(newlayout) - - def setstatusbar(self, text, fgcolor=None, bgcolor=None): - info = (text, fgcolor or self.STATUSBAR_FGCOLOR, bgcolor or self.STATUSBAR_BGCOLOR) - if info != self.statusbarinfo: - self.statusbarinfo = info - self.must_redraw = True - - def drawstatusbar(self): - text, fgcolor, bgcolor = self.statusbarinfo - maxheight = self.height / 2 - lines = rendertext(text, self.font, fgcolor, self.width, maxheight, - self.STATUSBAR_OVERFLOWCOLOR) - totalh = totalheight(lines) - y = self.height - totalh - self.status_bar_height = totalh + 16 - block = pygame.Surface((self.width, self.status_bar_height), SWSURFACE | SRCALPHA) - block.fill(bgcolor) - sy = 16 - for img in lines: - w, h = img.get_size() - block.blit(img, ((self.width-w)//2, sy-8)) - sy += h - block.set_alpha(int(255 * self.STATUSBAR_ALPHA)) - self.screen.blit(block, (0, y-16)) - - def notifymousepos(self, pos): - word = self.viewer.at_position(pos) - if word in self.layout.links: - info = self.layout.links[word] - self.setstatusbar(info) - self.sethighlight(word) - return - node = self.viewer.node_at_position(pos) - if node: - self.setstatusbar(shortlabel(node.label)) - self.sethighlight(obj=node) - return - edge = self.viewer.edge_at_position(pos) - if edge: - info = '%s -> %s' % (shortlabel(edge.tail.label), - shortlabel(edge.head.label)) - if edge.label: - info += '\n' + shortlabel(edge.label) - self.setstatusbar(info) - self.sethighlight(obj=edge) - return - self.sethighlight() - - def notifyclick(self, pos): - word = self.viewer.at_position(pos) - if word in self.layout.links: - self.setstatusbar('loading...') - self.redraw_now() - newlayout = self.layout.followlink(word) - if newlayout is not None: - self.setlayout(newlayout) - return - self.setstatusbar('') - node = self.viewer.node_at_position(pos) - if node: - self.look_at_node(node) - else: - edge = self.viewer.edge_at_position(pos) - if edge: - if (self.distance_to_node(edge.head) >= - self.distance_to_node(edge.tail)): - self.look_at_node(edge.head) - else: - self.look_at_node(edge.tail) - - def sethighlight(self, word=None, obj=None): - # The initialization of self.viewer.highlightwords should probably be - # moved to setlayout() - self.viewer.highlightwords = {} - for name in self.layout.links: - self.viewer.highlightwords[name] = ((128,0,0), None) - if word: - self.viewer.highlightwords[word] = ((255,255,80), (128,0,0)) - - if word == self.highlight_obj and obj is self.highlight_obj: - return # Nothing has changed, so there's no need to redraw - - if self.highlight_obj is not None: - self.highlight_obj.sethighlight(False) - if obj is not None: - obj.sethighlight(True) - self.highlight_word = word - self.highlight_obj = obj - self.must_redraw = True - - def animation(self, expectedtime=0.6): - start = time.time() - step = 0.0 - n = 0 - while True: - step += self.ANIM_STEP - if step >= expectedtime: - break - yield step / expectedtime - n += 1 - now = time.time() - frametime = (now-start) / n - self.ANIM_STEP = self.ANIM_STEP * 0.9 + frametime * 0.1 - yield 1.0 - - def distance_to_node(self, node): - cx1, cy1 = self.viewer.getcenter() - cx2, cy2 = node.x, node.y - return (cx2-cx1)*(cx2-cx1) + (cy2-cy1)*(cy2-cy1) - - def look_at_node(self, node, keep_highlight=False): - """Shift the node in view.""" - self.look_at(node.x, node.y, node.w, node.h, keep_highlight) - - def look_at_edge(self, edge, keep_highlight=False): - """Shift the edge's label into view.""" - points = edge.bezierpoints() - xmin = min([x for (x, y) in points]) - xmax = max([x for (x, y) in points]) - ymin = min([y for (x, y) in points]) - ymax = max([y for (x, y) in points]) - x = (xmin + xmax) / 2 - y = (ymin + ymax) / 2 - w = max(1, xmax - xmin) - h = max(1, ymax - ymin) - self.look_at(x, y, w, h, keep_highlight) - - def look_at(self, targetx, targety, targetw, targeth, - keep_highlight=False): - """Shift the node in view.""" - endscale = min(float(self.width-40) / targetw, - float(self.height-40) / targeth, - 75) - startscale = self.viewer.scale - cx1, cy1 = self.viewer.getcenter() - cx2, cy2 = targetx, targety - moving = (abs(startscale-endscale) + abs(cx1-cx2) + abs(cy1-cy2) - > 0.4) - if moving: - # if the target is far off the window, reduce scale along the way - tx, ty = self.viewer.map(cx2, cy2) - offview = max(-tx, -ty, tx-self.width, ty-self.height) - middlescale = endscale * (0.999 ** offview) - if offview > 150 and middlescale < startscale: - bumpscale = 4.0 * (middlescale - 0.5*(startscale+endscale)) - else: - bumpscale = 0.0 - if not keep_highlight: - self.statusbarinfo = None - self.sethighlight() - for t in self.animation(): - self.viewer.setscale(startscale*(1-t) + endscale*t + - bumpscale*t*(1-t)) - self.viewer.setcenter(cx1*(1-t) + cx2*t, cy1*(1-t) + cy2*t) - self.updated_viewer(keep_highlight=keep_highlight) - self.redraw_now() - return moving - - def peek(self, typ): - for event in self.events: - if event.type == typ: - return True - return False - - def process_event(self, event): - method = self.method_cache.get(event.type, KeyError) - if method is KeyError: - method = getattr(self, 'process_%s' % (pygame.event.event_name(event.type),), None) - self.method_cache[method] = method - if method is not None: - method(event) - - def process_MouseMotion(self, event): - if self.peek(MOUSEMOTION): - return - if self.dragging: - if (abs(event.pos[0] - self.click_origin[0]) + - abs(event.pos[1] - self.click_origin[1])) > 12: - self.click_time = None - dx = event.pos[0] - self.dragging[0] - dy = event.pos[1] - self.dragging[1] - if event.buttons[0]: # left mouse button - self.zoom(1.003 ** (dx+dy)) - else: - self.viewer.shiftoffset(-2*dx, -2*dy) - self.updated_viewer() - self.dragging = event.pos - self.must_redraw = True - else: - self.notifymousepos(event.pos) - - def process_MouseButtonDown(self, event): - self.dragging = self.click_origin = event.pos - self.click_time = time.time() - pygame.event.set_grab(True) - - def process_MouseButtonUp(self, event): - self.dragging = None - pygame.event.set_grab(False) - if self.click_time is not None and abs(time.time() - self.click_time) < 1: - # click (no significant dragging) - self.notifyclick(self.click_origin) - self.update_status_bar() - self.click_time = None - self.notifymousepos(event.pos) - - def process_KeyDown(self, event): - method, args = self.key_cache.get((event.key, event.mod), (None, None)) - if method is None and event.unicode: - char = event.unicode.lower() - mod = event.mod & ~ KMOD_SHIFT - method, args = self.ascii_key_cache.get((char, mod), (None, None)) - if method is not None: - method(*args) - - def process_VideoResize(self, event): - # short-circuit if there are more resize events pending - if self.peek(VIDEORESIZE): - return - self.resize(event.size) - self.must_redraw = True - - def process_Quit(self, event): - self.quit() - - def async_quit(self): - pygame.event.post(pygame.event.Event(QUIT)) - - def quit(self): - raise StopIteration - - def redraw_now(self): - self.viewer.render() - if self.statusbarinfo: - self.drawstatusbar() - else: - self.status_bar_height = 0 - pygame.display.flip() - self.must_redraw = False - - def run(self): - self.dragging = self.click_origin = self.click_time = None - events = self.events = [] - try: - - while True: - - if self.must_redraw and not events: - self.redraw_now() - - if not events: - events.append(pygame.event.wait()) - events.extend(pygame.event.get()) - - self.process_event(events.pop(0)) - - except StopIteration: - pass - - # cannot safely close and re-open the display, depending on - # Pygame version and platform. - pygame.display.set_mode((self.width,1)) - - -def shortlabel(label): - """Shorten a graph node label.""" - return label.replace('\\l', '').splitlines()[0] - - -def renderline(text, font, fgcolor, width, maxheight=sys.maxint, - overflowcolor=None): - """Render a single line of text into a list of images. - - Performs word wrapping. - """ - if overflowcolor is None: - overflowcolor = fgcolor - words = text.split(' ') - lines = [] - while words: - line = words.pop(0) - img = font.render(line or ' ', 1, fgcolor) - while words: - longerline = line + ' ' + words[0] - longerimg = font.render(longerline, 1, fgcolor) - w, h = longerimg.get_size() - if w > width: - break - words.pop(0) - line = longerline - img = longerimg - w, h = img.get_size() - if h > maxheight: - img = font.render('...', 1, overflowcolor) - w, h = img.get_size() - while lines and h > maxheight: - maxheight += lines.pop().get_size()[1] - lines.append(img) - break - maxheight -= h - lines.append(img) - return lines - - -def rendertext(text, font, fgcolor, width, maxheight=sys.maxint, - overflowcolor=None): - """Render a multiline string into a list of images. - - Performs word wrapping for each line individually.""" - lines = [] - for line in text.splitlines(): - l = renderline(line, font, fgcolor, width, maxheight, overflowcolor) - lines.extend(l) - maxheight -= totalheight(l) - if maxheight <= 0: - break - return lines - - -def totalheight(lines): - """Calculate the total height of a list of images.""" - totalh = 0 - for img in lines: - w, h = img.get_size() - totalh += h - return totalh diff --git a/pypy/translator/tool/sbclinvoke.sh b/pypy/translator/tool/sbclinvoke.sh deleted file mode 100644 index 81a90be458..0000000000 --- a/pypy/translator/tool/sbclinvoke.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -# Copy this to your PATH -sbcl --noinform --disable-debugger --load $1 --eval '(quit)' diff --git a/pypy/translator/tool/stdoutcapture.py b/pypy/translator/tool/stdoutcapture.py deleted file mode 100644 index 1ad1b29b52..0000000000 --- a/pypy/translator/tool/stdoutcapture.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -A quick hack to capture stdout/stderr. -""" - -import os, sys - - -class Capture: - - def __init__(self, mixed_out_err = False): - "Start capture of the Unix-level stdout and stderr." - if (not hasattr(os, 'tmpfile') or - not hasattr(os, 'dup') or - not hasattr(os, 'dup2') or - not hasattr(os, 'fdopen')): - self.dummy = 1 - else: - self.dummy = 0 - # make new stdout/stderr files if needed - self.localoutfd = os.dup(1) - self.localerrfd = os.dup(2) - if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1: - self.saved_stdout = sys.stdout - sys.stdout = os.fdopen(self.localoutfd, 'w', 1) - else: - self.saved_stdout = None - if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2: - self.saved_stderr = sys.stderr - sys.stderr = os.fdopen(self.localerrfd, 'w', 0) - else: - self.saved_stderr = None - self.tmpout = os.tmpfile() - if mixed_out_err: - self.tmperr = self.tmpout - else: - self.tmperr = os.tmpfile() - os.dup2(self.tmpout.fileno(), 1) - os.dup2(self.tmperr.fileno(), 2) - - def done(self): - "End capture and return the captured text (stdoutfile, stderrfile)." - if self.dummy: - import cStringIO - return cStringIO.StringIO(), cStringIO.StringIO() - else: - os.dup2(self.localoutfd, 1) - os.dup2(self.localerrfd, 2) - if self.saved_stdout is not None: - f = sys.stdout - sys.stdout = self.saved_stdout - f.close() - else: - os.close(self.localoutfd) - if self.saved_stderr is not None: - f = sys.stderr - sys.stderr = self.saved_stderr - f.close() - else: - os.close(self.localerrfd) - self.tmpout.seek(0) - self.tmperr.seek(0) - return self.tmpout, self.tmperr - - -if __name__ == '__main__': - # test - c = Capture() - try: - os.system('echo hello') - finally: - fout, ferr = c.done() - print 'Output:', `fout.read()` - print 'Error:', `ferr.read()` diff --git a/pypy/translator/tool/traceann.py b/pypy/translator/tool/traceann.py deleted file mode 100644 index bf08ce92ab..0000000000 --- a/pypy/translator/tool/traceann.py +++ /dev/null @@ -1,55 +0,0 @@ -""" - -- annotate and translate snippet producing tracing of calls -to AnnotationSet and RPythonAnnotator -- print bindings and final annotation set -and pyrex - -- display flow graph - -call it like: - -traceann <snippet-name> <types...> >result.txt 2>trace.txt - -""" - -import sys - -import autopath - -from pypy.translator.translator import * -from pypy.translator.test import snippet as test - -#from pypy.translator.tool import tracer -#tracer.trace(AnnotationSet) -#tracer.trace(RPythonAnnotator) - -try: - snippet_name = sys.argv[1] -except IndexError: - snippet_name = "call_five" - -argtypes = [] - -for argtype in sys.argv[2:]: - argtypes.append(eval(argtype)) - -t = Translator(getattr(test,snippet_name)) -t.simplify() -a = t.annotate(argtypes) -lines = [] -for key, value in a.bindings.items(): - lines.append('%r: %r' % (key, value)) -#for cl, attrdict in a.classes.items(): -# lines.append('%s: %r' % (cl.__name__,attrdict)) -lines.sort() -for line in lines: - print line -print '-'*50 -print a.heap -sys.stdout.flush() - -print '-'*50 -print t.pyrex() - -t.gv() diff --git a/pypy/translator/tool/tracer.py b/pypy/translator/tool/tracer.py deleted file mode 100644 index a1de899f85..0000000000 --- a/pypy/translator/tool/tracer.py +++ /dev/null @@ -1,42 +0,0 @@ -import types, sys -from pypy.annotation.model import SomeValue, debugname -from pypy.annotation.annset import AnnotationSet -from pypy.translator.annrpython import RPythonAnnotator - -indent1 = [''] - -def show(n): - if isinstance(n, AnnotationSet): - return 'heap' - elif isinstance(n, RPythonAnnotator): - return 'rpyann' - else: - return repr(n) - -def trace(o): - if isinstance(o, types.ClassType): - for key, value in o.__dict__.items(): - o.__dict__[key] = trace(value) - elif isinstance(o, types.FunctionType): - d = {'o': o, 'show': show, 'indent1': indent1, 'stderr': sys.stderr} - exec """ -def %s(*args, **kwds): - indent, = indent1 - rargs = [show(a) for a in args] - for kw, value in kwds.items(): - rargs.append('%%s=%%r' %% (kw, value)) - print >> stderr, indent + %r + '(%%s)' %% ', '.join(rargs) - indent1[0] += '| ' - try: - result = o(*args, **kwds) - except Exception, e: - indent1[0] = indent - print >> stderr, indent + '+--> %%s: %%s' %% (e.__class__.__name__, e) - raise - indent1[0] = indent - if result is not None: - print >> stderr, indent + '+-->', show(result) - return result -result = %s -""" % (o.__name__, o.__name__, o.__name__) in d - return d['result'] diff --git a/pypy/translator/transform.py b/pypy/translator/transform.py deleted file mode 100644 index 2e84ef4699..0000000000 --- a/pypy/translator/transform.py +++ /dev/null @@ -1,367 +0,0 @@ -"""Flow Graph Transformation - -The difference between simplification and transformation is that -transformation may introduce new space operation. -""" - -import types -from pypy.objspace.flow.model import SpaceOperation -from pypy.objspace.flow.model import Variable, Constant, Block, Link -from pypy.translator.annrpython import CannotSimplify -from pypy.annotation import model as annmodel - -# XXX: Lots of duplicated codes. Fix this! - -# ---------------------------------------------------------------------- -# The 'call_args' operation is the strangest one. The meaning of its -# arguments is as follows: -# -# call_args(<callable>, <shape>, <arg0>, <arg1>, <arg2>...) -# -# The shape must be a constant object, which describes how the remaining -# arguments are regrouped. The class pypy.interpreter.argument.Arguments -# has a method 'fromshape(shape, list-of-args)' that reconstructs a complete -# Arguments instance from this information. Don't try to interpret the -# shape anywhere else, but for reference, it is a 3-tuple: -# (number-of-pos-arg, tuple-of-keyword-names, flag-presence-of-*-arg) -# ---------------------------------------------------------------------- - -# [a] * b -# --> -# c = newlist(a) -# d = mul(c, int b) -# --> -# d = alloc_and_set(b, a) - -def fully_annotated_blocks(self): - """Ignore blocked blocks.""" - for block, is_annotated in self.annotated.iteritems(): - if is_annotated: - yield block - -def transform_allocate(self): - """Transforms [a] * b to alloc_and_set(b, a) where b is int.""" - for block in fully_annotated_blocks(self): - operations = block.operations[:] - n_op = len(operations) - for i in range(0, n_op-1): - op1 = operations[i] - op2 = operations[i+1] - if (op1.opname == 'newlist' and - len(op1.args) == 1 and - op2.opname == 'mul' and - op1.result is op2.args[0] and - self.gettype(op2.args[1]) is int): - new_op = SpaceOperation('alloc_and_set', - (op2.args[1], op1.args[0]), - op2.result) - block.operations[i+1:i+2] = [new_op] - -# a[b:c] -# --> -# d = newslice(b, c, None) -# e = getitem(a, d) -# --> -# e = getslice(a, b, c) - -def transform_slice(self): - """Transforms a[b:c] to getslice(a, b, c).""" - for block in fully_annotated_blocks(self): - operations = block.operations[:] - n_op = len(operations) - for i in range(0, n_op-1): - op1 = operations[i] - op2 = operations[i+1] - if (op1.opname == 'newslice' and - self.gettype(op1.args[2]) is types.NoneType and - op2.opname == 'getitem' and - op1.result is op2.args[1]): - new_op = SpaceOperation('getslice', - (op2.args[0], op1.args[0], op1.args[1]), - op2.result) - block.operations[i+1:i+2] = [new_op] - -# a(*b) -# --> -# c = newtuple(*b) -# d = newdict() -# e = call(function a, c, d) -# --> -# e = simple_call(a, *b) - -## REMOVED: now FlowObjSpace produces 'call_args' operations only -##def transform_simple_call(self): -## """Transforms call(a, (...), {}) to simple_call(a, ...)""" -## for block in self.annotated: -## known_vars = block.inputargs[:] -## operations = [] -## for op in block.operations: -## try: -## if op.opname != 'call': -## raise CannotSimplify -## varargs_cell = self.binding(op.args[1]) -## varkwds_cell = self.binding(op.args[2]) -## arg_cells = self.decode_simple_call(varargs_cell, -## varkwds_cell) -## if arg_cells is None: -## raise CannotSimplify - -## args = [self.reverse_binding(known_vars, c) for c in arg_cells] -## args.insert(0, op.args[0]) -## new_ops = [SpaceOperation('simple_call', args, op.result)] - -## except CannotSimplify: -## new_ops = [op] - -## for op in new_ops: -## operations.append(op) -## known_vars.append(op.result) - -## block.operations = operations - -def transform_dead_op_vars(self): - """Remove dead operations and variables that are passed over a link - but not used in the target block.""" - # the set of operations that can safely be removed (no side effects) - CanRemove = {'newtuple': True, - 'newlist': True, - 'newdict': True, - 'is_': True, - 'is_true': True} - read_vars = {} # set of variables really used - variable_flow = {} # map {Var: list-of-Vars-it-depends-on} - - # compute variable_flow and an initial read_vars - for block in self.annotated: - # figure out which variables are ever read - for op in block.operations: - if op.opname not in CanRemove: # mark the inputs as really needed - for arg in op.args: - read_vars[arg] = True - else: - # if CanRemove, only mark dependencies of the result - # on the input variables - deps = variable_flow.setdefault(op.result, []) - deps.extend(op.args) - - if isinstance(block.exitswitch, Variable): - read_vars[block.exitswitch] = True - - if block.exits: - for link in block.exits: - if link.target not in self.annotated: - for arg, targetarg in zip(link.args, link.target.inputargs): - read_vars[arg] = True - read_vars[targetarg] = True - else: - for arg, targetarg in zip(link.args, link.target.inputargs): - deps = variable_flow.setdefault(targetarg, []) - deps.append(arg) - else: - # return and except blocks implicitely use their input variable(s) - for arg in block.inputargs: - read_vars[arg] = True - # an input block's inputargs should not be modified, even if some - # of the function's input arguments are not actually used - if block.isstartblock: - for arg in block.inputargs: - read_vars[arg] = True - - # flow read_vars backwards so that any variable on which a read_vars - # depends is also included in read_vars - pending = list(read_vars) - for var in pending: - for prevvar in variable_flow.get(var, []): - if prevvar not in read_vars: - read_vars[prevvar] = True - pending.append(prevvar) - - for block in self.annotated: - - # look for removable operations whose result is never used - for i in range(len(block.operations)-1, -1, -1): - op = block.operations[i] - if op.result not in read_vars: - if op.opname in CanRemove: - del block.operations[i] - elif op.opname == 'simple_call': - # XXX we want to have a more effective and safe - # way to check if this operation has side effects - # ... - if op.args and isinstance(op.args[0], Constant): - func = op.args[0].value - if func is isinstance: - del block.operations[i] - - # look for output variables never used - # warning: this must be completely done *before* we attempt to - # remove the corresponding variables from block.inputargs! - # Otherwise the link.args get out of sync with the - # link.target.inputargs. - for link in block.exits: - assert len(link.args) == len(link.target.inputargs) - for i in range(len(link.args)-1, -1, -1): - if link.target.inputargs[i] not in read_vars: - del link.args[i] - # the above assert would fail here - - for block in self.annotated: - # look for input variables never used - # The corresponding link.args have already been all removed above - for i in range(len(block.inputargs)-1, -1, -1): - if block.inputargs[i] not in read_vars: - del block.inputargs[i] - -# expands the += operation between lists into a basic block loop. -# a = inplace_add(b, c) -# becomes the following graph: -# -# clen = len(c) -# growlist(b, clen) # ensure there is enough space for clen new items -# | -# | (pass all variables to next block, plus i=0) -# V -# ,--> z = lt(i, clen) -# | exitswitch(z): -# | | | False -# | | True `------------------> ...sequel... -# | V -# | x = getitem(c, i) -# | fastappend(b, x) -# | i1 = add(i, 1) -# | | -# `-----' (pass all variables, with i=i1) -# -##def transform_listextend(self): -## allblocks = list(self.annotated) -## for block in allblocks: -## for j in range(len(block.operations)): -## op = block.operations[j] -## if op.opname != 'inplace_add': -## continue -## a = op.result -## b, c = op.args -## s_list = self.bindings.get(b) -## if not isinstance(s_list, annmodel.SomeList): -## continue - -## # new variables -## clen = Variable() -## i = Variable() -## i1 = Variable() -## z = Variable() -## x = Variable() -## dummy = Variable() -## self.setbinding(clen, annmodel.SomeInteger(nonneg=True)) -## self.setbinding(i, annmodel.SomeInteger(nonneg=True)) -## self.setbinding(i1, annmodel.SomeInteger(nonneg=True)) -## self.setbinding(z, annmodel.SomeBool()) -## self.setbinding(x, s_list.s_item) -## self.setbinding(dummy, annmodel.SomeImpossibleValue()) - -## sequel_operations = block.operations[j+1:] -## sequel_exitswitch = block.exitswitch -## sequel_exits = block.exits - -## del block.operations[j:] -## block.operations += [ -## SpaceOperation('len', [c], clen), -## SpaceOperation('growlist', [b, clen], dummy), -## ] -## block.exitswitch = None -## allvars = block.getvariables() - -## condition_block = Block(allvars+[i]) -## condition_block.operations += [ -## SpaceOperation('lt', [i, clen], z), -## ] -## condition_block.exitswitch = z - -## loopbody_block = Block(allvars+[i]) -## loopbody_block.operations += [ -## SpaceOperation('getitem', [c, i], x), -## SpaceOperation('fastappend', [b, x], dummy), -## SpaceOperation('add', [i, Constant(1)], i1), -## ] - -## sequel_block = Block(allvars+[a]) -## sequel_block.operations = sequel_operations -## sequel_block.exitswitch = sequel_exitswitch - -## # link the blocks together -## block.recloseblock( -## Link(allvars+[Constant(0)], condition_block), -## ) -## condition_block.closeblock( -## Link(allvars+[i], loopbody_block, exitcase=True), -## Link(allvars+[b], sequel_block, exitcase=False), -## ) -## loopbody_block.closeblock( -## Link(allvars+[i1], condition_block), -## ) -## sequel_block.closeblock(*sequel_exits) - -## # now rename the variables -- so far all blocks use the -## # same variables, which is forbidden -## renamevariables(self, condition_block) -## renamevariables(self, loopbody_block) -## renamevariables(self, sequel_block) - -## allblocks.append(sequel_block) -## break - -##def renamevariables(self, block): -## """Utility to rename the variables in a block to fresh variables. -## The annotations are carried over from the old to the new vars.""" -## varmap = {} -## block.inputargs = [varmap.setdefault(a, Variable()) -## for a in block.inputargs] -## operations = [] -## for op in block.operations: -## result = varmap.setdefault(op.result, Variable()) -## args = [varmap.get(a, a) for a in op.args] -## op = SpaceOperation(op.opname, args, result) -## operations.append(op) -## block.operations = operations -## block.exitswitch = varmap.get(block.exitswitch, block.exitswitch) -## exits = [] -## for exit in block.exits: -## args = [varmap.get(a, a) for a in exit.args] -## exits.append(Link(args, exit.target, exit.exitcase)) -## block.recloseblock(*exits) -## # carry over the annotations -## for a1, a2 in varmap.items(): -## if a1 in self.bindings: -## self.setbinding(a2, self.bindings[a1]) -## self.annotated[block] = True - -def transform_dead_code(self): - """Remove dead code: these are the blocks that are not annotated at all - because the annotation considered that no conditional jump could reach - them.""" - for block in fully_annotated_blocks(self): - for link in block.exits: - if link not in self.links_followed: - lst = list(block.exits) - lst.remove(link) - block.exits = tuple(lst) - if len(block.exits) == 1: - block.exitswitch = None - block.exits[0].exitcase = None - - -def transform_graph(ann): - """Apply set of transformations available.""" - # WARNING: this produces incorrect results if the graph has been - # modified by t.simplify() after is had been annotated. - if ann.translator: - ann.translator.checkgraphs() - transform_dead_code(ann) - transform_allocate(ann) - transform_slice(ann) - ##transform_listextend(ann) - # do this last, after the previous transformations had a - # chance to remove dependency on certain variables - transform_dead_op_vars(ann) - if ann.translator: - ann.translator.checkgraphs() diff --git a/pypy/translator/translator.py b/pypy/translator/translator.py deleted file mode 100644 index e2d8243181..0000000000 --- a/pypy/translator/translator.py +++ /dev/null @@ -1,267 +0,0 @@ -"""PyPy Translator Frontend - -Glue script putting together the various pieces of the translator. -Can be used for interactive testing of the translator. Run as: - - python -i translator.py - -Example: - - t = Translator(func) - t.view() # control flow graph - - print t.source() # original source - print t.pyrex() # pyrex translation - print t.cl() # common lisp translation - - t.simplify() # flow graph simplification - a = t.annotate([int]) # pass the list of args types - a.simplify() # simplification by annotator - t.view() # graph + annotations under the mouse - - t.call(arg) # call original function - t.dis() # bytecode disassemble - - f = t.compile() # pyrex compilation - assert f(arg) == t.call(arg) # sanity check - -Some functions are provided for the benefit of interactive testing. -Try dir(test) for list of current snippets. -""" - -import autopath - -from pypy.objspace.flow.model import * -from pypy.annotation.model import * -from pypy.translator.annrpython import RPythonAnnotator -from pypy.translator.simplify import simplify_graph -from pypy.translator.genpyrex import GenPyrex -from pypy.translator.gencl import GenCL -from pypy.translator.genc import GenC, uniquemodulename -from pypy.translator.tool.buildpyxmodule import make_module_from_pyxstring -from pypy.translator.tool.buildpyxmodule import make_module_from_c -from pypy.objspace.flow import FlowObjSpace - - -class Translator: - - def __init__(self, func, verbose=False, simplifying=False): - self.entrypoint = func - self.verbose = verbose - self.simplifying = simplifying - self.clear() - - def clear(self): - """Clear all annotations and all flow graphs.""" - self.annotator = None - self.flowgraphs = {} # {function: graph} - self.functions = [] # the keys of self.flowgraphs, in creation order - self.callgraph = {} # {opaque_tag: (caller, callee)} - self.frozen = False # when frozen, no more flowgraphs can be generated - self.getflowgraph() - - def getflowgraph(self, func=None, called_by=None, call_tag=None): - """Get the flow graph for a function (default: the entry point).""" - func = func or self.entrypoint - try: - graph = self.flowgraphs[func] - except KeyError: - if self.verbose: - print 'getflowgraph (%s:%d) %s' % ( - func.func_globals.get('__name__', '?'), - func.func_code.co_firstlineno, - func.__name__) - assert not self.frozen - space = FlowObjSpace() - graph = space.build_flow(func) - if self.simplifying: - simplify_graph(graph) - self.flowgraphs[func] = graph - self.functions.append(func) - try: - import inspect - graph.func = func - graph.source = inspect.getsource(func) - except IOError: - pass # e.g. when func is defined interactively - if called_by: - self.callgraph[called_by, func, call_tag] = called_by, func - return graph - - def gv(self, func=None): - """Shows the control flow graph for a function (default: all) - -- requires 'dot' and 'gv'.""" - import os - from pypy.translator.tool.make_dot import make_dot, make_dot_graphs - if func is None: - # show the graph of *all* functions at the same time - graphs = [] - for func in self.functions: - graph = self.getflowgraph(func) - graphs.append((graph.name, graph)) - dest = make_dot_graphs(self.entrypoint.__name__, graphs) - else: - graph = self.getflowgraph(func) - dest = make_dot(graph.name, graph) - os.system('gv %s' % str(dest)) - - def view(self, *functions): - """Shows the control flow graph with annotations if computed. - Requires 'dot' and pygame.""" - from pypy.translator.tool.graphpage import FlowGraphPage - FlowGraphPage(self).display() - - def simplify(self, func=None): - """Simplifies the control flow graph (default: for all functions).""" - if func is None: - for func in self.flowgraphs.keys(): - self.simplify(func) - else: - graph = self.getflowgraph(func) - simplify_graph(graph) - - def annotate(self, input_args_types, func=None): - """annotate(self, input_arg_types[, func]) -> Annotator - - Provides type information of arguments. Returns annotator. - """ - func = func or self.entrypoint - if self.annotator is None: - self.annotator = RPythonAnnotator(self) - graph = self.getflowgraph(func) - self.annotator.build_types(graph, input_args_types, func) - return self.annotator - - def checkgraphs(self): - for graph in self.flowgraphs.itervalues(): - checkgraph(graph) - - def source(self, func=None): - """Returns original Python source. - - Returns <interactive> for functions written while the - interactive session. - """ - func = func or self.entrypoint - graph = self.getflowgraph(func) - return getattr(graph, 'source', '<interactive>') - - def pyrex(self, input_arg_types=None, func=None): - """pyrex(self[, input_arg_types][, func]) -> Pyrex translation - - Returns Pyrex translation. If input_arg_types is provided, - returns type annotated translation. Subsequent calls are - not affected by this. - """ - return self.generatecode(GenPyrex, input_arg_types, func) - - def cl(self, input_arg_types=None, func=None): - """cl(self[, input_arg_types][, func]) -> Common Lisp translation - - Returns Common Lisp translation. If input_arg_types is provided, - returns type annotated translation. Subsequent calls are - not affected by this. - """ - return self.generatecode(GenCL, input_arg_types, func) - - def c(self): - """c(self) -> C (CPython) translation - - Returns C (CPython) translation. - """ - from StringIO import StringIO - out = StringIO() - genc = GenC(out, self) - return out.getvalue() - - def generatecode(self, gencls, input_arg_types, func): - if input_arg_types is None: - ann = self.annotator - else: - ann = RPythonAnnotator(self) - if func is None: - codes = [self.generatecode1(gencls, input_arg_types, - self.entrypoint, ann)] - for func in self.functions: - if func is not self.entrypoint: - code = self.generatecode1(gencls, None, func, ann, - public=False) - codes.append(code) - else: - codes = [self.generatecode1(gencls, input_arg_types, func, ann)] - code = self.generateglobaldecl(gencls, func, ann) - if code: - codes.insert(0, code) - return '\n\n#_________________\n\n'.join(codes) - - def generatecode1(self, gencls, input_arg_types, func, ann, public=True): - graph = self.getflowgraph(func) - g = gencls(graph) - g.by_the_way_the_function_was = func # XXX - if input_arg_types is not None: - ann.build_types(graph, input_arg_types, func) - if ann is not None: - g.setannotator(ann) - return g.emitcode(public) - - def generateglobaldecl(self, gencls, func, ann): - graph = self.getflowgraph(func) - g = gencls(graph) - if ann is not None: - g.setannotator(ann) - return g.globaldeclarations() - - def compile(self): - """Returns compiled function, compiled using Pyrex. - """ - from pypy.tool.udir import udir - name = self.entrypoint.func_name - pyxcode = self.pyrex() - mod = make_module_from_pyxstring(name, udir, pyxcode) - return getattr(mod, name) - - def ccompile(self, really_compile=True): - """Returns compiled function, compiled using the C generator. - """ - from pypy.tool.udir import udir - name = uniquemodulename(self.entrypoint.func_name) - cfile = udir.join('%s.c' % name) - f = cfile.open('w') - f2 = udir.join('%s-init.h' % name).open('w') - GenC(f, self, name, f2=f2, f2name='%s-init.h' % name) - f2.close() - f.close() - if not really_compile: - return cfile - mod = make_module_from_c(cfile, - include_dirs=[autopath.this_dir]) - return getattr(mod, self.entrypoint.func_name) - - def call(self, *args): - """Calls underlying Python function.""" - return self.entrypoint(*args) - - def dis(self, func=None): - """Disassembles underlying Python function to bytecodes.""" - from dis import dis - dis(func or self.entrypoint) - -## def consider_call(self, ann, func, args): -## graph = self.getflowgraph(func) -## ann.addpendingblock(graph.startblock, args) -## result_var = graph.getreturnvar() -## try: -## return ann.binding(result_var) -## except KeyError: -## # typical case for the 1st call, because addpendingblock() did -## # not actually start the analysis of the called function yet. -## return impossiblevalue - - -if __name__ == '__main__': - from pypy.translator.test import snippet as test - print __doc__ - - # 2.3 specific -- sanxiyn - import os - os.putenv("PYTHONINSPECT", "1") |