diff --git a/robotpy_build/autowrap/cxxparser.py b/robotpy_build/autowrap/cxxparser.py index 4dd57fea..b0a2208e 100644 --- a/robotpy_build/autowrap/cxxparser.py +++ b/robotpy_build/autowrap/cxxparser.py @@ -1,5 +1,7 @@ -# parses a header file and outputs a HeaderContext suitable for use with -# the autowrap templates +# +# Uses cxxheaderparser to parse a header file and outputs a HeaderContext +# suitable for use with the autowrap templates +# from keyword import iskeyword import pathlib @@ -20,14 +22,13 @@ BufferData, BufferType, ClassData, - EnumData, EnumValue, FunctionData, ParamData, PropAccess, ReturnValuePolicy, ) -from ..generator_data import GeneratorData +from ..generator_data import GeneratorData, OverloadTracker from .j2_context import ( BaseClassData, @@ -39,6 +40,7 @@ FunctionContext, GeneratedLambda, HeaderContext, + ParamCategory, ParamContext, PropContext, TemplateInstanceContext, @@ -46,17 +48,30 @@ ) from .mangle import trampoline_signature +from cxxheaderparser.tokfmt import tokfmt from cxxheaderparser.types import ( + AnonymousName, + Array, ClassDecl, + DecoratedType, EnumDecl, Field, ForwardDecl, + FunctionType, FriendDecl, + FundamentalSpecifier, Function, Method, + MoveReference, NamespaceAlias, NameSpecifier, + Parameter, + Pointer, + PQName, + PQNameSegment, + Reference, TemplateInst, + Type, Typedef, UsingAlias, UsingDecl, @@ -76,7 +91,7 @@ class HasSubpackage(Protocol): - subpackage: str + subpackage: typing.Optional[str] class HasDoc(Protocol): @@ -131,6 +146,100 @@ def _gen_int_types(): _default_enum_value = EnumValue() +def _is_fundamental(n: PQNameSegment): + return isinstance(n, FundamentalSpecifier) or ( + isinstance(n, NameSpecifier) and n.name in _int32_types + ) + + +def _is_prop_readonly(t: DecoratedType) -> bool: + while True: + if isinstance(t, Array): + return True + elif isinstance(t, (FunctionType, MoveReference, Reference)): + return False + elif isinstance(t, Type): + return not _is_fundamental(t.typename.segments[-1]) + elif isinstance(t, Pointer): + t = t.ptr_to + else: + assert False + + +# def _unwrap(t: DecoratedType) -> typing.Union[FunctionType, Type]: +# # TODO: move this to cxxheaderparser, probably rename ptr_to/ref_to to 'to' +# while True: +# if isinstance(t, (FunctionType, Type)): +# return t +# elif isinstance(t, Pointer): +# t = t.ptr_to +# elif isinstance(t, Reference): +# t = t.ref_to +# elif isinstance(t, MoveReference): +# t = t.moveref_to +# elif isinstance(t, Array): +# t = t.array_of +# else: +# assert False + + +def _count_and_unwrap( + t: DecoratedType, +) -> typing.Tuple[typing.Union[Array, FunctionType, Type], int, int]: + # TODO: const and volatile are a problem here + ptrs = 0 + refs = 0 + while True: + if isinstance(t, (Array, FunctionType, Type)): + return t, ptrs, refs + elif isinstance(t, Pointer): + ptrs += 1 + t = t.ptr_to + elif isinstance(t, Reference): + refs += 1 + t = t.ref_to + elif isinstance(t, MoveReference): + refs += 2 + t = t.moveref_to + else: + assert False + + +def _fmt_base_name(typename: PQName) -> typing.Tuple[str, str, typing.List[str]]: + # returns all names, with specialization for everything except the last + # returns all names + specialization + # returns list of all specialization parameters + + assert False + + +def _fmt_type(type: DecoratedType) -> str: + # include all specializations + + # Used in function signature.. + # param_sig = ", ".join( + # p.get("enum", p["raw_type"]) + "&" * p["reference"] + "*" * p["pointer"] + # for p in fn["parameters"] + # ) + + assert False + + +def _fmt_nameonly(typename: PQName) -> typing.Optional[str]: + parts = [] + for segment in typename.segments: + if not isinstance(segment, NameSpecifier): + return None + parts.append(segment.name) + return "::".join(parts) + + +# def _fmt_specialization(spec: TemplateSpecialization) -> str: +# pass + +# def _make_name(typename: PQName): + + @dataclass class _ReturnParamContext: #: was x_type @@ -154,6 +263,25 @@ class ClassStateData(typing.NamedTuple): cls_key: str data: ClassData + typealias_names = typing.Set[str] + + # have to defer processing these + defer_protected_methods: typing.List[Method] + defer_private_virtual_methods: typing.List[Method] + defer_protected_fields: typing.List[Field] + + # Needed for trampoline + cls_cpp_identifier: str + template_argument_list: str + base_template_params: typing.Optional[typing.List[str]] + base_template_args: typing.Optional[typing.List[str]] + + # See j2_context::TrampolineData for these + methods_to_disable: typing.List[FunctionContext] + virtual_methods: typing.List[FunctionContext] + protected_constructors: typing.List[FunctionContext] + non_virtual_protected_methods: typing.List[FunctionContext] + Context = typing.Union[str, ClassStateData] @@ -173,14 +301,21 @@ class AutowrapVisitor: """ types: typing.Set[str] + user_types: typing.Set[str] def __init__( - self, hctx: HeaderContext, gendata: GeneratorData, report_only: bool + self, + hctx: HeaderContext, + gendata: GeneratorData, + casters: typing.Dict[str, typing.Dict[str, typing.Any]], + report_only: bool, ) -> None: self.gendata = gendata self.hctx = hctx self.report_only = report_only + self.casters = casters self.types = set() + self.user_types = set() # # Visitor interface @@ -237,9 +372,28 @@ def on_variable(self, state: AWState, v: Variable) -> None: pass def on_function(self, state: AWState, fn: Function) -> None: - pass # TODO + # operators that aren't class members aren't rendered + if fn.operator: + return + + # ignore functions with complicated names + fn_name = self._get_fn_name(fn) + if not fn_name: + return + + data, overload_tracker = self.gendata.get_function_data(fn_name, fn) + if data.ignore: + return + + scope_var = self._get_module_var(data) + fctx = self._on_fn_or_method( + fn, data, fn_name, scope_var, False, overload_tracker + ) + fctx.namespace = state.user_data + self.hctx.functions.append(fctx) def on_method_impl(self, state: AWState, method: Method) -> None: + # we only wrap methods when defined in a class pass def on_typedef(self, state: AWState, typedef: Typedef) -> None: @@ -249,19 +403,26 @@ def on_using_namespace(self, state: AWState, namespace: typing.List[str]) -> Non pass def on_using_alias(self, state: AWState, using: UsingAlias) -> None: - pass - # if using.access == "public" - # self._add_type_caster(u["raw_type"]) + self._add_type_caster(using.type) + + # autodetect embedded using directives, but don't override anything + # the user specifies + # - these are in block scope, so they cannot include templates + if ( + using.access == "public" + and using.template is None + and using.alias not in state.user_data.typealias_names + ): + ctx = state.user_data.ctx + ctx.auto_typealias.append( + f"using {using.alias} [[maybe_unused]] = typename {ctx.full_cpp_name}::{using.alias}" + ) def on_using_declaration(self, state: AWState, using: UsingDecl) -> None: + self._add_type_caster_pqname(using.typename) + if using.access is None: self.hctx.using_declarations.append(using.typename) - else: - # TODO in a class - pass - - # for _, u in cls["using"].items(): - # self._add_type_caster(u["raw_type"]) # # Enums @@ -269,9 +430,20 @@ def on_using_declaration(self, state: AWState, using: UsingDecl) -> None: def on_enum(self, state: AWState, enum: EnumDecl) -> None: - # If the name has no components, its unnamed - # If it has more than one component, they are part of its full name - ename = "::".join(map(str, enum.typename.segments)) + # If an enum name has more than one component, that's weird and we're + # not going to support it for now. Who forward declares enums anyways? + name_segs = enum.typename.segments + assert len(name_segs) > 0 + + if len(name_segs) > 1: + return + elif isinstance(name_segs[0], NameSpecifier): + ename = name_segs[0].name + elif isinstance(name_segs[0], AnonymousName): + ename = "" + else: + # something else weird we can't support for now + return user_data = state.user_data @@ -282,22 +454,15 @@ def on_enum(self, state: AWState, enum: EnumDecl) -> None: if enum_data.ignore: return - scope_var = self._get_module_var(enum_data) - ctxlist = self.hctx.enums var_name = f"enum{len(ctxlist)}" - value_scope = namespace - - # self.hctx.enums.append( - # self._enum_hook(en["namespace"], scope_var, var_name, en, enum_data) - # ) + enum_scope = user_data + scope_var = self._get_module_var(enum_data) else: - # per-class + # per-class -- ignore private/protected enums if enum.access != "public": return - # cls_key needed for prop data - # ... I think this is just the full cpp name without the namespace enum_data = self.gendata.get_cls_enum_data( ename, user_data.cls_key, user_data.data ) @@ -306,8 +471,6 @@ def on_enum(self, state: AWState, enum: EnumDecl) -> None: cls_ctx = user_data.ctx - scope = f"{cls_ctx.full_cpp_name}::" - if ename: ctxlist = cls_ctx.enums var_name = f"{cls_ctx.var_name}_enum{len(ctxlist)}" @@ -315,18 +478,19 @@ def on_enum(self, state: AWState, enum: EnumDecl) -> None: ctxlist = cls_ctx.unnamed_enums var_name = f"{cls_ctx.var_name}_enum_u{len(ctxlist)}" + enum_scope = cls_ctx.full_cpp_name + scope_var = cls_ctx.var_name + value_prefix = None strip_prefixes = [] values: typing.List[EnumeratorContext] = [] - # TODO: THIS ISN'T RIGHT YET, NEED VALUE_SCOPE INIT PROPERLY py_name = "" full_cpp_name = "" - value_scope = scope + value_scope = f"{enum_scope}::{ename}" if ename: - full_cpp_name = f"{scope}{ename}" - value_scope = f"{full_cpp_name}::" + full_cpp_name = value_scope py_name = self._make_py_name(ename, enum_data) value_prefix = enum_data.value_prefix @@ -377,9 +541,7 @@ def on_class_start(self, state: AWClassBlockState) -> typing.Optional[bool]: ): return False - cls_key, cls_name, cls_namespace, parent_ctx = self._get_cls_key_name_and_ns( - state - ) + cls_key, cls_name, cls_namespace, parent_ctx = self._process_class_name(state) class_data = self.gendata.get_class_data(cls_key) # Ignore explicitly ignored classes @@ -387,8 +549,9 @@ def on_class_start(self, state: AWClassBlockState) -> typing.Optional[bool]: return False for typename in class_data.force_type_casters: - self._add_type_caster(typename) + self._add_user_type_caster(typename) + class_decl = state.class_decl var_name = f"cls_{cls_name}" # No template stuff @@ -408,65 +571,9 @@ def on_class_start(self, state: AWClassBlockState) -> typing.Optional[bool]: # Process inheritance # - pybase_params = set() - bases: typing.List[BaseClassData] = [] - ignored_bases = {ib: True for ib in class_data.ignored_bases} - - # ignored_bases includes specializations - - for base in state.class_decl.bases: - if ignored_bases.pop(base["class"], None) or base["access"] == "private": - continue - - bqual = class_data.base_qualnames.get(base["decl_name"]) - if bqual: - full_cpp_name_w_templates = bqual - # TODO: sometimes need to add this to pybase_params, but - # that would require parsing this more. Seems sufficiently - # obscure, going to omit it for now. - tp = bqual.find("<") - if tp == -1: - base_full_cpp_name = bqual - template_params = "" - else: - base_full_cpp_name = bqual[:tp] - template_params = bqual[tp + 1 : -1] - else: - if "::" not in base["decl_name"]: - base_full_cpp_name = f'{cls_namespace}::{base["decl_name"]}' - else: - base_full_cpp_name = base["decl_name"] - - base_decl_params = base.get("decl_params") - if base_decl_params: - template_params = self._make_base_params( - base_decl_params, pybase_params - ) - full_cpp_name_w_templates = ( - f"{base_full_cpp_name}<{template_params}>" - ) - else: - template_params = "" - full_cpp_name_w_templates = base_full_cpp_name - - base_identifier = base_full_cpp_name.translate(_qualname_trans) - - bases.append( - BaseClassData( - full_cpp_name=base_full_cpp_name, - full_cpp_name_w_templates=full_cpp_name_w_templates, - full_cpp_name_identifier=base_identifier, - template_params=template_params, - ) - ) - - if not self.report_only and ignored_bases: - bases = ", ".join(str(base.typename) for base in state.class_decl.bases) - invalid_bases = ", ".join(ignored_bases.keys()) - raise ValueError( - f"{cls_name}: ignored_bases contains non-existant bases " - + f"{invalid_bases}; valid bases are {bases}" - ) + bases, pybase_params = self._process_class_bases( + cls_namespace, cls_name, class_decl, class_data + ) self.hctx.class_hierarchy[simple_cls_qualname] = [ base.full_cpp_name for base in bases @@ -495,6 +602,9 @@ def on_class_start(self, state: AWClassBlockState) -> typing.Optional[bool]: base_template_args = [] base_template_params = [] + # TODO: should be able to remove this parsing since cxxheaderparser + # can figure it out for us + for param in class_data.template_params: if " " in param: arg = param.split(" ", 1)[1] @@ -523,7 +633,7 @@ def on_class_start(self, state: AWClassBlockState) -> typing.Optional[bool]: base_template_args = None if not self.report_only: - if "template" in cls: + if class_decl.template: if template_parameter_list == "": raise ValueError( f"{cls_name}: must specify template_params for templated class, or ignore it" @@ -534,6 +644,16 @@ def on_class_start(self, state: AWClassBlockState) -> typing.Optional[bool]: f"{cls_name}: cannot specify template_params for non-template class" ) + # + # Other stuff + # + + if class_data.is_polymorphic is not None: + is_polymorphic = class_data.is_polymorphic + else: + # bad assumption? probably + is_polymorphic = len(class_decl.bases) + doc = self._process_doc(class_decl.doxygen, class_data) py_name = self._make_py_name(cls_name, class_data) @@ -542,26 +662,61 @@ def on_class_start(self, state: AWClassBlockState) -> typing.Optional[bool]: name = constant.split("::")[-1] constants.append((name, constant)) + # Also need typealias_names as user_typealais + # do logic for extracting user defined typealiases here + # - these are at class scope, so they can include template + typealias_names: typing.Set[str] = set() + user_typealias: typing.List[str] = [] + self._extract_typealias(class_data.typealias, user_typealias, typealias_names) + ctx = ClassContext( - parent=parent, + parent=parent_ctx, namespace=cls_namespace, - cpp_name=cpp_name, - full_cpp_name=full_cpp_name, + cpp_name=cls_name, + full_cpp_name=cls_qualname, py_name=py_name, scope_var=scope_var, var_name=var_name, nodelete=class_data.nodelete, - final=state.class_decl.final, + final=class_decl.final, doc=doc, bases=bases, template=template_data, user_typealias=user_typealias, constants=constants, - inline_code=class_data.inline_code, + inline_code=class_data.inline_code or "", + force_multiple_inheritance=class_data.force_multiple_inheritance, + is_polymorphic=is_polymorphic, + ) + + # Add to parent class or global class list + if parent_ctx: + parent_ctx.child_classes.append(ctx) + else: + self.hctx.classes.append(ctx) + + # Store for other events to use + state.user_data = ClassStateData( + ctx=ctx, + cls_key=cls_key, + data=class_data, + typealias_names=typealias_names, + # Method data + defer_protected_methods=[], + defer_private_virtual_methods=[], + defer_protected_fields=[], + # Trampoline data + cls_cpp_identifier=cls_cpp_identifier, + template_argument_list=template_argument_list, + base_template_args=base_template_args, + base_template_params=base_template_params, + methods_to_disable=[], + virtual_methods=[], + protected_constructors=[], + non_virtual_protected_methods=[], ) - state.user_data = ClassStateData(ctx=ctx, cls_key=cls_key, data=class_data) - def _get_cls_key_name_and_ns( + def _process_class_name( self, state: AWClassBlockState ) -> typing.Tuple[str, str, str, typing.Optional[ClassContext]]: class_decl = state.class_decl @@ -597,31 +752,856 @@ def _get_cls_key_name_and_ns( return cls_key, cls_name, cls_namespace, parent_ctx - def _on_class_templates(self): - pass + def _process_class_bases( + self, + cls_namespace: str, + cls_name: str, + class_decl: ClassDecl, + class_data: ClassData, + ) -> typing.Tuple[typing.List[BaseClassData], typing.Set[str],]: + bases: typing.List[BaseClassData] = [] + pybase_params: typing.Set[str] = set() + ignored_bases = {ib: True for ib in class_data.ignored_bases} + + for base in class_decl.bases: + if base.access == "private": + continue + + cpp_name, cpp_name_w_templates, tparam_list = _fmt_base_name(base.typename) + if ignored_bases.pop(cpp_name_w_templates, None): + continue + + # Sometimes, we can't guess all the information about the base, so the + # user needs to specify it explicitly. + user_bqual = class_data.base_qualnames.get(cpp_name_w_templates) + if user_bqual: + cpp_name_w_templates = user_bqual + # TODO: sometimes need to add this to pybase_params, but + # that would require parsing this more. Seems sufficiently + # obscure, going to omit it for now. + tp = user_bqual.find("<") + if tp == -1: + cpp_name = user_bqual + template_params = "" + else: + cpp_name = user_bqual[:tp] + template_params = user_bqual[tp + 1 : -1] + else: + for param in tparam_list: + pybase_params.add(param) + + template_params = ", ".join(tparam_list) + + # If no explicit namespace specified, we assume base classes + # live in the same namespace as the class + if len(base.typename.segments) == 1: + cpp_name = f"{cls_namespace}::{cpp_name}" + cpp_name_w_templates = f"{cls_namespace}::{cpp_name_w_templates}" + + base_identifier = cpp_name.translate(_qualname_trans) + + bases.append( + BaseClassData( + full_cpp_name=cpp_name, + full_cpp_name_w_templates=cpp_name_w_templates, + full_cpp_name_identifier=base_identifier, + template_params=template_params, + ) + ) + + if not self.report_only and ignored_bases: + bases = ", ".join(str(base.typename) for base in class_decl.bases) + invalid_bases = ", ".join(ignored_bases.keys()) + raise ValueError( + f"{cls_name}: ignored_bases contains non-existant bases " + + f"{invalid_bases}; valid bases are {bases}" + ) + + return bases, pybase_params def on_class_field(self, state: AWClassBlockState, f: Field) -> None: - # cannot bind without a trampoline - but we can't know if we need one - # until it's done, so defer it? - pass + # Ignore unnamed fields + if not f.name: + return + + access = f.access + if access == "public": + self._on_class_field(state, f, state.user_data.ctx.public_properties) + elif access == "protected": + state.user_data.defer_protected_fields.append(f) + + def _on_class_field( + self, state: AWClassBlockState, f: Field, props: typing.List[PropContext] + ) -> None: + prop_name = f.name + propdata = self.gendata.get_cls_prop_data( + prop_name, state.user_data.cls_key, state.user_data.data + ) + if propdata.ignore: + return + self._add_type_caster(f.type) + if propdata.rename: + py_name = propdata.rename + elif f.access != "public": + py_name = f"_{prop_name}" + elif iskeyword(prop_name): + py_name = f"{prop_name}_" + else: + py_name = prop_name + + if propdata.access == PropAccess.AUTOMATIC: + # const variables can't be written + if f.constexpr or getattr(f.type, "const", False): + prop_readonly = True + # We assume that a struct intentionally has readwrite data + # attributes regardless of type + elif state.class_decl.classkey != "class": + prop_readonly = False + else: + prop_readonly = _is_prop_readonly(f.type) + else: + prop_readonly = propdata.access == PropAccess.READONLY + + doc = self._process_doc(f.doxygen, propdata) + + array_size = None + is_array = isinstance(f.type, Array) + if is_array: + array_size = str(f.type.size) + + props.append( + PropContext( + py_name=py_name, + cpp_name=prop_name, + cpp_type=_fmt_type(f.type), + readonly=prop_readonly, + doc=doc, + array_size=array_size, + array=is_array, + reference=f.type.ref_to, + static=f.static, + ) + ) def on_class_method(self, state: AWClassBlockState, method: Method) -> None: - pass + + # This needs to only process enough about the method to answer things + # that are needed in on_class_end. Some methods are only processed in + # on_class_end if the answers are right + cdata = state.user_data + cctx = cdata.ctx + + if method.constructor: + cctx.has_constructor = True + is_polymorphic = method.virtual or method.override or method.final + if is_polymorphic: + cctx.is_polymorphic = True + + access = state.access + if access == "public": + # Go ahead and process public methods now + self._on_class_method(state, method) + elif access == "protected": + cdata.defer_protected_methods.append(method) + elif access == "private" and is_polymorphic: + cdata.defer_private_virtual_methods.append(method) + + def _on_class_method( + self, + state: AWClassBlockState, + method: Method, + methods: typing.List[FunctionContext], + ) -> None: + + cdata = state.user_data + cctx = cdata.ctx + + # I think this is always true? + assert len(method.name.segments) == 1 + + method_name = self._get_fn_name(method) + if not method_name: + return + + is_constructor = method.constructor + is_override = method.override + is_virtual = method.virtual or is_override + + operator = method.operator + + # Ignore some operators, deleted methods, destructors + if ( + (operator and operator not in _operators) + or method.destructor + or method.deleted + ): + return + + # Also ignore move constructors and copy constructors + if ( + is_constructor + and len(method.parameters) == 1 + and self._is_copy_move_constructor(cctx, method.parameters[0].type) + ): + return + + is_final = method.final + is_private = state.access == "private" + + method_data, overload_tracker = self.gendata.get_function_data( + method_name, + method, + cdata.cls_key, + cdata.data, + state.access == "private", + ) + if method_data.ignore: + return + + fctx = self._on_fn_or_method( + method, + method_data, + method_name, + cdata.ctx.scope_var, + state.access != "public", + overload_tracker, + ) + + # Update class-specific method attributes + if operator: + fctx.operator = operator + self.hctx.need_operators_h = True + if method_data.no_release_gil is None: + fctx.release_gil = False + + if method.static: + fctx.is_static_method = True + if method.pure_virtual: + fctx.is_pure_virtual = True + + # Update method lists + if is_private and is_override: + cdata.methods_to_disable.append(fctx) + else: + if is_final: + cdata.methods_to_disable.append(fctx) + + # disable virtual method generation for functions with buffer + # parameters (doing it correctly is hard, so we skip it) + if is_virtual and not fctx.has_buffers: + cdata.virtual_methods.append(fctx) + + if not is_private: + if not fctx.ignore_py: + methods.append(fctx) + + if state.access == "protected": + if is_constructor: + cdata.protected_constructors.append(fctx) + elif not is_virtual: + cdata.non_virtual_protected_methods.append(fctx) + + # If the method has cpp_code defined, it must either match the function + # signature of the method, or virtual_xform must be defined with an + # appropriate conversion. If neither of these are true, it will lead + # to difficult to diagnose errors at runtime. We add a static assert + # to try and catch these errors at compile time + need_vcheck = ( + is_virtual + and method_data.cpp_code + and not method_data.virtual_xform + and not method_data.trampoline_cpp_code + and not state.class_decl.final + and not cdata.data.force_no_trampoline + ) + if need_vcheck: + cctx.vcheck_fns.append(fctx) + self.hctx.has_vcheck = True + + # automatically retain references passed to constructors if the + # user didn't specify their own keepalive + # if not method_data.keepalive: + # if is_constructor and p_reference == 1: + # fn.keepalives.append((1, i + 2)) + + # Check for user data errors + if not self.report_only: + + if method_data.ignore_pure and not method.pure_virtual: + raise ValueError( + f"{cdata.cls_key}::{method_name}: cannot specify ignore_pure for function that isn't pure" + ) + + if method_data.trampoline_cpp_code and not is_virtual: + raise ValueError( + f"{cdata.cls_key}::{method_name}: cannot specify trampoline_cpp_code for a non-virtual method" + ) + + if method_data.virtual_xform and not is_virtual: + raise ValueError( + f"{cdata.cls_key}::{method_name}: cannot specify virtual_xform for a non-virtual method" + ) + + # pybind11 doesn't support this, user must fix it + if ( + method.ref_qualifier == "&&" + and not method_data.ignore_py + and not method_data.cpp_code + ): + raise ValueError( + f"{cdata.cls_key}::{method_name}: has && ref-qualifier which cannot be directly bound by pybind11, must specify cpp_code or ignore_py" + ) + + def _is_copy_move_constructor( + self, cctx: ClassContext, first_type_param: DecoratedType + ) -> bool: + if isinstance(first_type_param, Reference): + t = first_type_param.ref_to + elif isinstance(first_type_param, MoveReference): + t = first_type_param.moveref_to + else: + return False + + if not isinstance(t, Type): + return False + + last_seg = t.typename.segments[-1] + if not isinstance(last_seg, NameSpecifier): + return False + + if len(t.typename.segments) == 1: + return last_seg.name == cctx.cpp_name + else: + # This isn't quite right, but probably rarely happens? + param_name = _fmt_nameonly(t.typename) + return param_name == cctx.full_cpp_name def on_class_friend(self, state: AWClassBlockState, friend: FriendDecl) -> None: pass def on_class_end(self, state: AWClassBlockState) -> None: # post-process the class data - pass + cdata = state.user_data + ctx = cdata.ctx + class_data = cdata.data + + # If there isn't already a constructor, add a default constructor + # - was going to add a FunctionContext for it, but.. this is way easier + ctx.add_default_constructor = ( + not ctx.has_constructor + and not class_data.nodelete + and not class_data.force_no_default_constructor + ) + + has_trampoline = ( + ctx.is_polymorphic + and not state.class_decl.final + and not class_data.force_no_trampoline + ) + + # process methods and fields + if has_trampoline: + state.access = "protected" + for m in cdata.defer_protected_methods: + self._on_class_method(state, m) + + props = ctx.protected_properties + for f in cdata.defer_protected_fields: + self._on_class_field(state, f, props) + + state.access = "private" + for m in cdata.defer_private_virtual_methods: + self._on_class_method(state, m) + + self.hctx.classes_with_trampolines.append(ctx) + + tmpl = "" + if cdata.template_argument_list: + tmpl = f", {cdata.template_argument_list}" + + trampoline_cfg = f"rpygen::PyTrampolineCfg_{cdata.cls_cpp_identifier}<{cdata.template_argument_list}>" + tname = f"rpygen::PyTrampoline_{cdata.cls_cpp_identifier}" + tvar = f"{ctx.var_name}_Trampoline" + + if cdata.base_template_params: + tmpl_args = ", ".join(cdata.base_template_args) + tmpl_params = ", ".join(cdata.base_template_params) + else: + tmpl_args = "" + tmpl_params = "" + + ctx.trampoline = TrampolineData( + full_cpp_name=tname, + var=tvar, + inline_code=class_data.trampoline_inline_code, + tmpl_args=tmpl_args, + tmpl_params=tmpl_params, + methods_to_disable=cdata.methods_to_disable, + virtual_methods=cdata.virtual_methods, + protected_constructors=cdata.protected_constructors, + non_virtual_protected_methods=cdata.non_virtual_protected_methods, + ) + + elif class_data.trampoline_inline_code is not None: + raise ValueError( + f"{cdata.cls_key} has trampoline_inline_code specified, but there is no trampoline!" + ) + + # + # Function/method processing + # + + def _on_fn_or_method( + self, + fn: Function, + data: FunctionData, + fn_name: str, + scope_var: str, + internal: bool, + overload_tracker: OverloadTracker, + ) -> FunctionContext: + + # if cpp_code is specified, don't release the gil unless the user + # specifically asks for it + if data.no_release_gil is None: + release_gil = data.cpp_code is None + else: + release_gil = not data.no_release_gil + + all_params: typing.List[ParamContext] = [] + filtered_params: typing.List[ParamContext] = [] + keepalives = [] + + has_out_param = False + + # Use this if one of the parameter types don't quite match + param_override = data.param_override + fn_disable_none = data.disable_none + + # keep track of param name changes so we can automatically update + # documentation + param_remap: typing.Dict[str, str] = {} + + # + # Process parameters + # + + for i, p in enumerate(fn.parameters): + p_name = p.name + if not p_name: + p_name = f"param{i}" + + po = param_override.get(p_name, _default_param_data) + + pctx = self._on_fn_param( + p, + p_name, + fn_disable_none, + po, + param_remap, + ) + + all_params.append(pctx) + if not po.ignore: + filtered_params.append(pctx) + + if pctx.category == ParamCategory.OUT: + has_out_param = True + + return_value_policy = _rvp_map[data.return_value_policy] + + # Set up the function's name + if data.rename: + # user preference wins, of course + py_name = data.rename + elif isinstance(fn, Method) and fn.constructor: + py_name = "__init__" + else: + # Python exposed function name converted to camelcase + py_name = self._make_py_name( + fn_name, data, is_operator=fn.operator is not None + ) + if not py_name[:2].isupper(): + py_name = f"{py_name[0].lower()}{py_name[1:]}" + + if data.internal or internal: + py_name = f"_{py_name}" + + doc = self._process_doc(fn.doxygen, data, param_remap=param_remap) + + # Allow the user to override our auto-detected keepalives + if data.keepalive is not None: + keepalives = data.keepalive + + # Check for user errors + if not self.report_only: + if fn.template: + if data.template_impls is None and not data.cpp_code: + raise ValueError( + f"{fn_name}: must specify template impls for function template" + ) + else: + if data.template_impls is not None: + raise ValueError( + f"{fn_name}: cannot specify template_impls for non-template functions" + ) + + # + # fn_retval is needed for gensig, vcheck assertions + # - gensig is not computable here + # + fn_retval: typing.Optional[str] = None + if fn.return_type: + fn_retval = _fmt_type(fn.return_type) + self._add_type_caster(fn.return_type) + + fctx = FunctionContext( + cpp_name=fn_name, + doc=doc, + scope_var=scope_var, + # transforms + py_name=py_name, + cpp_return_type=fn_retval, + all_params=all_params, + filtered_params=filtered_params, + has_buffers=bool(data.buffers), + keepalives=keepalives, + return_value_policy=return_value_policy, + # info + # vararg=fn.vararg, + # user settings + ignore_pure=data.ignore_pure, + ignore_py=data.ignore_py, + cpp_code=data.cpp_code, + trampoline_cpp_code=data.trampoline_cpp_code, + ifdef=data.ifdef, + ifndef=data.ifndef, + release_gil=release_gil, + template_impls=data.template_impls, + virtual_xform=data.virtual_xform, + is_overloaded=overload_tracker, + _fn=fn, + ) + + # Generate a special lambda wrapper only when needed + if not data.cpp_code and (has_out_param or fctx.has_buffers): + self._on_fn_make_lambda(data, fctx) + + return fctx + + def _on_fn_param( + self, + p: Parameter, + p_name: str, + fn_disable_none: bool, + param_override: ParamData, + param_remap: typing.Dict[str, str], + ): + # Cool.. almost done, just need to figure out the param stuff + + # + # Overrides + # + + # + # Name stuff + # + + # + # Type stuff + # + + ptype, p_pointer, p_reference = _count_and_unwrap(p.type) + fundamental = isinstance(ptype, Type) and _is_fundamental( + ptype.typename.segments[-1] + ) + self._add_type_caster(ptype) + + # Ok, this is weird, why does cpp_type not have a const, then + # adds it later? I guess cppheaderparser legacy + # .. cpp_type isn't used anywhere + + cpp_type_no_const = _fmt_type(ptype) + cpp_type = cpp_type_no_const + + if p_pointer: + call_name = p_name + elif p_reference: + call_name = f"std::forward({p_name})" + else: + call_name = f"std::move({p_name})" + + # This is different because call_name might get special treatment later + virtual_call_name = call_name + cpp_retname = orig_pname = p_name + + # TODO: this is precarious + # - needs to override some things + force_out = False + default = None + disable_none = fn_disable_none + if param_override is not _default_param_data: + force_out = param_override.force_out + if param_override.name: + p_name = param_override.name + if param_override.x_type: + cpp_type = param_override.x_type + self._add_user_type_caster(cpp_type) + if param_override.default: + default = param_override.default + if param_override.disable_none is not None: + disable_none = param_override.disable_none + + py_pname = p_name + if iskeyword(py_pname): + py_pname = f"{py_pname}_" + + if orig_pname != py_pname: + param_remap[orig_pname] = py_pname + + # Autodetect disable_none if not explicitly specified + if disable_none is None: + disable_none = cpp_type.startswith("std::function") + + if disable_none: + py_arg = f'py::arg("{py_pname}").none(false)' + else: + py_arg = f'py::arg("{py_pname}")' + + # + # Default parameter + # + + # Do this after cpp_type is resolved but before it gets its const + if not default and p.default: + default = tokfmt(p.default.tokens) + + if default: + default = self._resolve_default(default, cpp_type, ptype) + if not param_override.disable_type_caster_default_cast: + default = self._add_default_arg_cast(p, default, cpp_type) + if default: + py_arg = f"{py_arg} = {default}" + + pcat = ParamCategory.IN + + if force_out or ( + (p_pointer or p_reference == 1) and not p_const and fundamental + ): + if p_pointer: + call_name = f"&{call_name}" + else: + call_name = p_name + + pcat = ParamCategory.OUT + elif p["array"]: + asz = param_override.array_size or p.get("array_size", 0) + if asz: + cpp_type = f"std::array<{cpp_type}, {asz}>" + call_name = f"{call_name}.data()" + if not default: + default = "{}" + else: + # it's a vector + pass + pcat = ParamCategory.OUT + + if p_const: + cpp_type = f"const {cpp_type}" + + x_type_full = cpp_type + x_type_full += "&" * p_reference + x_type_full += "*" * p_pointer + + return ParamContext( + arg_name=p_name, + cpp_type=cpp_type, + full_cpp_type=x_type_full, + py_arg=py_arg, + default=default, + # only used by genlambda + call_name=call_name, + # only used if virtual, duh + virtual_call_name=virtual_call_name, + cpp_retname=cpp_retname, + category=pcat, + ) + + def _on_fn_make_lambda(self, data: FunctionData, fctx: FunctionContext): + """ + When we need to transform the C++ function to make it more pythonic, we + autogenerate a lambda function as part of the wrapper. This is needed: + + * When an 'out' parameter is detected (a pointer receiving a value) + * When a buffer + size parameter exists (either in or out) + """ + + # Statements to insert before calling the function + lambda_pre: typing.List[str] = [] + + # If buffer overrides present, apply those to the parameters + if data.buffers: + self._apply_buffer_params(data, fctx, lambda_pre) + + in_params: typing.List[ParamContext] = [] + out_params: typing.List[ParamContext] = [] + ret_params: typing.List[_ReturnParamContext] = [] + tmp_params: typing.List[ParamContext] = [] + + # + # Sort the parameters + # + + for pctx in fctx.filtered_params: + if pctx.category == ParamCategory.OUT: + out_params.append(pctx) + tmp_params.append(pctx) + + elif pctx.category == ParamCategory.IN: + in_params.append(pctx) + + elif pctx.category == ParamCategory.TMP: + tmp_params.append(pctx) + + call_start = "" + lambda_ret = "" + + # Return values (original return value + any out parameters) + fn_retval = fctx.cpp_return_type + if fn_retval and fn_retval != "void": + call_start = "auto __ret =" + ret_params = [_ReturnParamContext(cpp_retname="__ret", cpp_type=fn_retval)] + ret_params.extend(out_params) + else: + ret_params = out_params[:] + + if len(ret_params) == 1 and ret_params[0].cpp_type != "void": + lambda_ret = f"return {ret_params[0].cpp_retname};" + elif len(ret_params) > 1: + t = ",".join([p.cpp_retname for p in ret_params]) + lambda_ret = f"return std::make_tuple({t});" + + # Temporary values to store out parameters in + if tmp_params: + for out in reversed(tmp_params): + odef = out.default + if not odef: + lambda_pre.insert(0, f"{out.cpp_type} {out.arg_name}") + elif odef.startswith("{"): + lambda_pre.insert(0, f"{out.cpp_type} {out.arg_name}{odef}") + else: + lambda_pre.insert(0, f"{out.cpp_type} {out.arg_name} = {odef}") + + pre = _lambda_predent + f";\n{_lambda_predent}".join(lambda_pre) + ";" + + fctx.genlambda = GeneratedLambda( + pre=pre, + call_start=call_start, + ret=lambda_ret, + in_params=in_params, + out_params=out_params, + ) + + def _apply_buffer_params( + self, + data: FunctionData, + fctx: FunctionContext, + lambda_pre: typing.List[str], + ): + """ + Modifies the function parameters for buffer usage + """ + + # buffers: accepts a python object that supports the buffer protocol + # as input. If the buffer is an 'out' buffer, then it + # will request a writeable buffer. Data is written by the + # wrapped function to that buffer directly, and the length + # written (if the length is a pointer) will be returned + buffer_params: typing.Dict[str, BufferData] = {} + buflen_params: typing.Dict[str, BufferData] = {} + + for bufinfo in data.buffers: + if bufinfo.src in buffer_params: + raise ValueError( + f"buffer src({bufinfo.src}) is in multiple buffer specifications" + ) + elif bufinfo.len in buflen_params: + raise ValueError( + f"buffer len({bufinfo.len}) is in multiple buffer specifications" + ) + buffer_params[bufinfo.src] = bufinfo + buflen_params[bufinfo.len] = bufinfo + + dups = set(buffer_params.keys()).intersection(buflen_params.keys()) + if dups: + names = "', '".join(dups) + raise ValueError(f"These params are both buffer src and len: '{names}'") + + for pctx in fctx.all_params: + p_name = pctx.arg_name + if p_name in buffer_params: + bufinfo = buffer_params.pop(p_name) + bname = f"__{bufinfo.src}" + + pctx.call_name = f"({pctx.cpp_type}*){bname}.ptr" + pctx.cpp_type = "const py::buffer" + pctx.full_cpp_type = "const py::buffer&" + + # this doesn't seem to be true for bytearrays, which is silly + # x_lambda_pre.append( + # f'if (PyBuffer_IsContiguous((Py_buffer*){p_name}.ptr(), \'C\') == 0) throw py::value_error("{p_name}: buffer must be contiguous")' + # ) + + # TODO: check for dimensions, strides, other dangerous things + + # bufinfo was validated and converted before it got here + pctx.category = ParamCategory.IN + if bufinfo.type is BufferType.IN: + lambda_pre += [f"auto {bname} = {p_name}.request(false)"] + else: + lambda_pre += [f"auto {bname} = {p_name}.request(true)"] + + lambda_pre += [f"{bufinfo.len} = {bname}.size * {bname}.itemsize"] + + if bufinfo.minsz: + lambda_pre.append( + f'if ({bufinfo.len} < {bufinfo.minsz}) throw py::value_error("{p_name}: minimum buffer size is {bufinfo.minsz}")' + ) + + elif p_name in buflen_params: + buflen = buflen_params.pop(p_name) + + # If the length is a pointer, assume that the function will accept + # an incoming length, and set the outgoing length + if pctx.full_cpp_type.endswith("*"): + pctx.call_name = f"&{buflen.len}" + pctx.category = ParamCategory.OUT + else: + # if it's not a pointer, then the called function + # can't communicate through it, so ignore the parameter + pctx.call_name = buflen.len + pctx.category = ParamCategory.TMP + + if buffer_params: + names = "', '".join(buffer_params.keys()) + raise ValueError(f"incorrect buffer param names '{names}'") + + if buflen_params: + names = "', '".join(buflen_params.keys()) + raise ValueError(f"incorrect buffer length names '{names}'") # # Utility methods # - def _add_type_caster(self, typename: str): - # defer until the end since there's lots of duplication - self.types.add(typename) + def _get_fn_name(self, fn: Function) -> typing.Optional[str]: + s = fn.name.segments[-1] + if isinstance(s, NameSpecifier): + return s.name + + # name is too complicated (can this happen? should be a warning?) + assert False + return None def _get_module_var(self, data: HasSubpackage) -> str: if data.subpackage: @@ -700,11 +1680,150 @@ def _quote_doc(self, doc: typing.Optional[str]) -> Documentation: return doc_quoted + def _extract_typealias( + self, + in_ta: typing.List[str], + out_ta: typing.List[str], + ta_names: typing.Set[str], + ): + for typealias in in_ta: + if typealias.startswith("template"): + out_ta.append(typealias) + else: + teq = typealias.find("=") + if teq != -1: + ta_name = typealias[:teq].strip() + out_ta.append(f"using {typealias}") + else: + ta_name = typealias.split("::")[-1] + out_ta.append(f"using {ta_name} = {typealias}") + ta_names.add(ta_name) + + def _resolve_default( + self, + name: str, + cpp_type: str, + ptype: typing.Union[Array, FunctionType, Type], + ) -> str: + + if name.isnumeric() or name in ("NULL", "nullptr"): + pass + elif name[0] == "{" and name[-1] == "}": + if isinstance(ptype, Array): + return name + return f"{cpp_type}{name}" + + # if there's a parent, look there + # -> this seems rather expensive for little reward, how often do we need + # this? Also, doesn't have any test coverage yet, so let's not do it + # for now + # + # parent = fn["parent"] + # if parent: + # for prop in parent["properties"]["public"]: + # if prop["name"] == name: + # name = f"{parent['namespace']}::{parent['name']}::{name}" + + return name + + def _add_default_arg_cast( + self, name: str, ptype: typing.Union[Array, FunctionType, Type] + ) -> str: + # Adds an explicit cast to a default arg for certain types that have + # a type caster with an explicit default + while isinstance(ptype, Array): + ptype = ptype.array_of + if isinstance(ptype, Type): + typename = _fmt_nameonly(ptype.typename) + if typename: + ccfg = self.casters.get(typename) + if ccfg and ccfg.get("darg"): + found_typename = ccfg["typename"] + name = f"({found_typename}){name}" + + return name + + # + # type caster utilities + # + + def _add_type_caster(self, dt: DecoratedType): + # pick apart the type and add each to the list of types + # - it would be nice if we could just add this to a set + # and process it later, but that would probably be just + # as much work? + while True: + if isinstance(dt, Type): + self._add_type_caster_pqname(dt.typename) + return + + elif isinstance(dt, FunctionType): + self._add_type_caster(dt.return_type) + for p in dt.parameters: + self._add_type_caster(p.type) + return + + elif isinstance(dt, Pointer): + dt = dt.ptr_to + elif isinstance(dt, Reference): + dt = dt.ref_to + elif isinstance(dt, MoveReference): + dt = dt.moveref_to + elif isinstance(dt, Array): + dt = dt.array_of + else: + assert False + + def _add_type_caster_pqname(self, typename: PQName): + parts = [] + for p in typename.segments: + if not isinstance(p, NameSpecifier): + return + parts.append(p.name) + if p.specialization: + for a in p.specialization.args: + if isinstance(a.arg, (DecoratedType, FunctionType)): + self._add_type_caster(a.arg) + + self.types.add("::".join(parts)) + + def _add_user_type_caster(self, typename: str): + # defer until the end since there's lots of duplication + self.user_types.add(typename) + + def _process_user_type_casters(self): + # processes each user type caster and adds it to the processed list + types = self.types + for typename in self.user_types: + tmpl_idx = typename.find("<") + if tmpl_idx == -1: + types.add(typename) + else: + types.add(typename[:tmpl_idx]) + types.update( + _type_caster_seps.split(typename[tmpl_idx:].replace(" ", "")) + ) + + def _set_type_caster_includes(self) -> typing.List[str]: + # process user casters + self._process_user_type_casters() + casters = self.casters + + # identify any associated headers + includes = set() + for typename in self.types: + ccfg = casters.get(typename) + if ccfg: + includes.add(ccfg["hdr"]) + + self.hctx.type_caster_includes = sorted(includes) + def parse_header( header_path: pathlib.Path, user_cfg: AutowrapConfigYaml, parser_options: ParserOptions, + casters: typing.Dict[str, typing.Dict[str, typing.Any]], ) -> HeaderContext: # defines, include_paths need to be set the parent @@ -726,17 +1845,46 @@ def parse_header( using_signature=_using_signature, ) - # Why not just use the simple parser? - # . seems more performant to not use it - # . is there a way I could use it but not build the data structure - - visitor = AutowrapVisitor(hctx) + # Parse the header using a custom visitor + visitor = AutowrapVisitor(hctx, gendata, casters, report_only) parser = CxxParser(str(header_path), content, visitor, parser_options) parser.parse() - # post-process per-header things - # - user typealias - # - type caster includes + # + # Per-header data + # + + for i, (k, tmpl_data) in enumerate(user_cfg.templates.items()): + qualname = tmpl_data.qualname + if "::" not in qualname: + qualname = f"::{qualname}" + qualname = qualname.translate(_qualname_trans) + + doc_add = tmpl_data.doc_append + if doc_add: + doc_add = f"\n{doc_add}" + + # TODO: this should be a list, not a dict + hctx.template_instances[str(i)] = TemplateInstanceContext( + scope_var=visitor._get_module_var(tmpl_data), + var_name=f"tmplCls{i}", + py_name=k, + full_cpp_name_identifier=qualname, + binder_typename=f"bind_{qualname}_{i}", + params=tmpl_data.params, + header_name=f"{qualname}.hpp", + doc_set=visitor._quote_doc(tmpl_data.doc), + doc_add=visitor._quote_doc(doc_add), + ) + + for param in tmpl_data.params: + visitor._add_user_type_caster(param) + + # User typealias additions + visitor._extract_typealias(user_cfg.typealias, hctx.user_typealias, set()) + + # Type caster + visitor._set_type_caster_includes() # missing reporter @@ -753,4 +1901,7 @@ def parse_header( # report only # casters? hm. + + # - type caster includes (add dummy printer here) + pass diff --git a/robotpy_build/autowrap/hooks.py b/robotpy_build/autowrap/hooks.py index 7e36c13f..ce7d20bc 100644 --- a/robotpy_build/autowrap/hooks.py +++ b/robotpy_build/autowrap/hooks.py @@ -112,7 +112,7 @@ class Hooks: def __init__( self, data: AutowrapConfigYaml, - casters: typing.Dict[str, typing.Dict[str, typing.Any]], + report_only: bool, hname: str, ): @@ -121,23 +121,23 @@ def __init__( self.casters = casters self.report_only = report_only - self.types: typing.Set[str] = set() + # self.types: typing.Set[str] = set() - self.hctx = HeaderContext( - hname=hname, - extra_includes=data.extra_includes, - extra_includes_first=data.extra_includes_first, - inline_code=data.inline_code, - trampoline_signature=trampoline_signature, - using_signature=_using_signature, - ) + # self.hctx = HeaderContext( + # hname=hname, + # extra_includes=data.extra_includes, + # extra_includes_first=data.extra_includes_first, + # inline_code=data.inline_code, + # trampoline_signature=trampoline_signature, + # using_signature=_using_signature, + # ) def report_missing(self, name: str, reporter: MissingReporter): self.gendata.report_missing(name, reporter) - def _add_type_caster(self, typename: str): - # defer until the end since there's lots of duplication - self.types.add(typename) + # def _add_type_caster(self, typename: str): + # # defer until the end since there's lots of duplication + # self.types.add(typename) # def _get_module_var(self, data: HasSubpackage) -> str: # if data.subpackage: @@ -147,55 +147,55 @@ def _add_type_caster(self, typename: str): # return "m" - def _get_type_caster_cfgs(self, typename: str): - tmpl_idx = typename.find("<") - if tmpl_idx == -1: - typenames = [typename] - else: - typenames = [typename[:tmpl_idx]] + _type_caster_seps.split( - typename[tmpl_idx:].replace(" ", "") - ) - for typename in typenames: - if typename: - ccfg = self.casters.get(typename) - if ccfg: - yield ccfg - - def _get_type_caster_includes(self): - includes = set() - for typename in self.types: - for ccfg in self._get_type_caster_cfgs(typename): - includes.add(ccfg["hdr"]) - return sorted(includes) - - def _make_py_name( - self, - name: str, - data: HasNameData, - strip_prefixes: typing.Optional[typing.List[str]] = None, - is_operator=False, - ): - if data.rename: - return data.rename - - if strip_prefixes is None: - strip_prefixes = self.rawdata.strip_prefixes - - if strip_prefixes: - for pfx in strip_prefixes: - if name.startswith(pfx): - n = name[len(pfx) :] - if n.isidentifier(): - name = n - break - - if iskeyword(name): - return f"{name}_" - if not name.isidentifier() and not is_operator: - if not self.report_only: - raise ValueError(f"name {name!r} is not a valid identifier") - - return name + # def _get_type_caster_cfgs(self, typename: str): + # tmpl_idx = typename.find("<") + # if tmpl_idx == -1: + # typenames = [typename] + # else: + # typenames = [typename[:tmpl_idx]] + _type_caster_seps.split( + # typename[tmpl_idx:].replace(" ", "") + # ) + # for typename in typenames: + # if typename: + # ccfg = self.casters.get(typename) + # if ccfg: + # yield ccfg + + # def _get_type_caster_includes(self): + # includes = set() + # for typename in self.types: + # for ccfg in self._get_type_caster_cfgs(typename): + # includes.add(ccfg["hdr"]) + # return sorted(includes) + + # def _make_py_name( + # self, + # name: str, + # data: HasNameData, + # strip_prefixes: typing.Optional[typing.List[str]] = None, + # is_operator=False, + # ): + # if data.rename: + # return data.rename + + # if strip_prefixes is None: + # strip_prefixes = self.rawdata.strip_prefixes + + # if strip_prefixes: + # for pfx in strip_prefixes: + # if name.startswith(pfx): + # n = name[len(pfx) :] + # if n.isidentifier(): + # name = n + # break + + # if iskeyword(name): + # return f"{name}_" + # if not name.isidentifier() and not is_operator: + # if not self.report_only: + # raise ValueError(f"name {name!r} is not a valid identifier") + + # return name # def _process_doc( # self, @@ -237,145 +237,145 @@ def _make_py_name( # return doc_quoted - def _resolve_default(self, fn, p, name, cpp_type) -> str: - if isinstance(name, (int, float)): - return str(name) - if name in ("NULL", "nullptr"): - return name - - if name and name[0] == "{" and name[-1] == "}": - if p["array"]: - return name - return f"{cpp_type}{name}" - - # if there's a parent, look there - parent = fn["parent"] - if parent: - for prop in parent["properties"]["public"]: - if prop["name"] == name: - name = f"{parent['namespace']}::{parent['name']}::{name}" - return name - - def _add_default_arg_cast(self, p, name, cpp_type): - found_typename = None - for ccfg in self._get_type_caster_cfgs(cpp_type): - if ccfg.get("darg"): - if found_typename and found_typename != ccfg["typename"]: - raise HookError( - f"multiple type casters found for {p['name']} ({cpp_type}), use disable_type_caster_default_cast" - ) - found_typename = ccfg["typename"] - name = f"({found_typename}){name}" - - return name - - def _get_function_signature(self, fn): - param_sig = ", ".join( - p.get("enum", p["raw_type"]) + "&" * p["reference"] + "*" * p["pointer"] - for p in fn["parameters"] - ) - param_sig = param_sig.replace(" >", ">") - if fn["const"]: - if param_sig: - param_sig += " [const]" - else: - param_sig = "[const]" - - return param_sig - - def _process_base_param(self, decl_param): - params = decl_param.get("params") - if params: - # recurse - params = [self._process_base_param(param) for param in params] - return f"{decl_param['param']}<{', '.join(params)}>" - else: - return decl_param["param"] - - def _make_base_params( - self, base_decl_params, pybase_params: typing.Set[str] - ) -> str: - base_params = [ - self._process_base_param(decl_param) for decl_param in base_decl_params - ] - - for decl_param in base_params: - pybase_params.add(decl_param) - - return ", ".join(base_params) - - def _extract_typealias( - self, - in_ta: typing.List[str], - out_ta: typing.List[str], - ta_names: typing.Set[str], - ): - for typealias in in_ta: - if typealias.startswith("template"): - out_ta.append(typealias) - else: - teq = typealias.find("=") - if teq != -1: - ta_name = typealias[:teq].strip() - out_ta.append(f"using {typealias}") - else: - ta_name = typealias.split("::")[-1] - out_ta.append(f"using {ta_name} = {typealias}") - ta_names.add(ta_name) + # def _resolve_default(self, fn, p, name, cpp_type) -> str: + # if isinstance(name, (int, float)): + # return str(name) + # if name in ("NULL", "nullptr"): + # return name + + # if name and name[0] == "{" and name[-1] == "}": + # if p["array"]: + # return name + # return f"{cpp_type}{name}" + + # # if there's a parent, look there + # parent = fn["parent"] + # if parent: + # for prop in parent["properties"]["public"]: + # if prop["name"] == name: + # name = f"{parent['namespace']}::{parent['name']}::{name}" + # return name + + # def _add_default_arg_cast(self, p, name, cpp_type): + # found_typename = None + # for ccfg in self._get_type_caster_cfgs(cpp_type): + # if ccfg.get("darg"): + # if found_typename and found_typename != ccfg["typename"]: + # raise HookError( + # f"multiple type casters found for {p['name']} ({cpp_type}), use disable_type_caster_default_cast" + # ) + # found_typename = ccfg["typename"] + # name = f"({found_typename}){name}" + + # return name + + # def _get_function_signature(self, fn): + # param_sig = ", ".join( + # p.get("enum", p["raw_type"]) + "&" * p["reference"] + "*" * p["pointer"] + # for p in fn["parameters"] + # ) + # param_sig = param_sig.replace(" >", ">") + # if fn["const"]: + # if param_sig: + # param_sig += " [const]" + # else: + # param_sig = "[const]" + + # return param_sig + + # def _process_base_param(self, decl_param): + # params = decl_param.get("params") + # if params: + # # recurse + # params = [self._process_base_param(param) for param in params] + # return f"{decl_param['param']}<{', '.join(params)}>" + # else: + # return decl_param["param"] + + # def _make_base_params( + # self, base_decl_params, pybase_params: typing.Set[str] + # ) -> str: + # base_params = [ + # self._process_base_param(decl_param) for decl_param in base_decl_params + # ] + + # for decl_param in base_params: + # pybase_params.add(decl_param) + + # return ", ".join(base_params) + + # def _extract_typealias( + # self, + # in_ta: typing.List[str], + # out_ta: typing.List[str], + # ta_names: typing.Set[str], + # ): + # for typealias in in_ta: + # if typealias.startswith("template"): + # out_ta.append(typealias) + # else: + # teq = typealias.find("=") + # if teq != -1: + # ta_name = typealias[:teq].strip() + # out_ta.append(f"using {typealias}") + # else: + # ta_name = typealias.split("::")[-1] + # out_ta.append(f"using {ta_name} = {typealias}") + # ta_names.add(ta_name) def header_hook(self, header, data): """Called for each header""" - self.hctx.rel_fname = header.rel_fname - - for using in header.using.values(): - if using["using_type"] == "declaration": - self.hctx.using_declarations.append(using["raw_type"]) - - for i, en in enumerate(header.enums): - enum_data = self.gendata.get_enum_data(en.get("name")) - - if not enum_data.ignore: - scope_var = self._get_module_var(enum_data) - var_name = f"enum{i}" - self.hctx.enums.append( - self._enum_hook(en["namespace"], scope_var, var_name, en, enum_data) - ) - - for v in header.variables: - # TODO: in theory this is used to wrap global variables, but it's - # currently totally ignored - self.gendata.get_prop_data(v["name"]) - self._add_type_caster(v["raw_type"]) - - for _, u in header.using.items(): - self._add_type_caster(u["raw_type"]) - - for i, (k, tmpl_data) in enumerate(data["data"].templates.items()): - qualname = tmpl_data.qualname - if "::" not in qualname: - qualname = f"::{qualname}" - qualname = qualname.translate(self._qualname_trans) - - doc_add = tmpl_data.doc_append - if doc_add: - doc_add = f"\n{doc_add}" - - # TODO: this should be a list, not a dict - self.hctx.template_instances[str(i)] = TemplateInstanceContext( - scope_var=self._get_module_var(tmpl_data), - var_name=f"tmplCls{i}", - py_name=k, - full_cpp_name_identifier=qualname, - binder_typename=f"bind_{qualname}_{i}", - params=tmpl_data.params, - header_name=f"{qualname}.hpp", - doc_set=self._quote_doc(tmpl_data.doc), - doc_add=self._quote_doc(doc_add), - ) - - for param in tmpl_data.params: - self._add_type_caster(param) + # self.hctx.rel_fname = header.rel_fname + + # for using in header.using.values(): + # if using["using_type"] == "declaration": + # self.hctx.using_declarations.append(using["raw_type"]) + + # for i, en in enumerate(header.enums): + # enum_data = self.gendata.get_enum_data(en.get("name")) + + # if not enum_data.ignore: + # scope_var = self._get_module_var(enum_data) + # var_name = f"enum{i}" + # self.hctx.enums.append( + # self._enum_hook(en["namespace"], scope_var, var_name, en, enum_data) + # ) + + # for v in header.variables: + # # TODO: in theory this is used to wrap global variables, but it's + # # currently totally ignored + # self.gendata.get_prop_data(v["name"]) + # self._add_type_caster(v["raw_type"]) + + # for _, u in header.using.items(): + # self._add_type_caster(u["raw_type"]) + + # for i, (k, tmpl_data) in enumerate(data["data"].templates.items()): + # qualname = tmpl_data.qualname + # if "::" not in qualname: + # qualname = f"::{qualname}" + # qualname = qualname.translate(self._qualname_trans) + + # doc_add = tmpl_data.doc_append + # if doc_add: + # doc_add = f"\n{doc_add}" + + # # TODO: this should be a list, not a dict + # self.hctx.template_instances[str(i)] = TemplateInstanceContext( + # scope_var=self._get_module_var(tmpl_data), + # var_name=f"tmplCls{i}", + # py_name=k, + # full_cpp_name_identifier=qualname, + # binder_typename=f"bind_{qualname}_{i}", + # params=tmpl_data.params, + # header_name=f"{qualname}.hpp", + # doc_set=self._quote_doc(tmpl_data.doc), + # doc_add=self._quote_doc(doc_add), + # ) + + # for param in tmpl_data.params: + # self._add_type_caster(param) self.hctx.type_caster_includes = self._get_type_caster_includes() @@ -389,430 +389,430 @@ def header_hook(self, header, data): data["skip_generation"] = skip_generation data.update(self.hctx.__dict__) - def _function_hook( - self, - fn, - data: FunctionData, - scope_var: str, - internal: bool, - overload_tracker: OverloadTracker, - ) -> FunctionContext: - """shared with methods/functions""" - - # if cpp_code is specified, don't release the gil unless the user - # specifically asks for it - if data.no_release_gil is None: - if data.cpp_code: - data.no_release_gil = True - - x_all_params: typing.List[ParamContext] = [] - x_in_params: typing.List[ParamContext] = [] - out_params: typing.List[ParamContext] = [] - x_filtered_params: typing.List[ParamContext] = [] - x_rets: typing.List[_ReturnParamContext] = [] - x_temps: typing.List[ParamContext] = [] - keepalives = [] - - param_remap = {} - - has_buffers = len(data.buffers) > 0 - need_lambda = False - genlambda: typing.Optional[GeneratedLambda] = None - lambda_pre: typing.List[str] = [] - - # Use this if one of the parameter types don't quite match - param_override = data.param_override - - # buffers: accepts a python object that supports the buffer protocol - # as input. If the buffer is an 'out' buffer, then it - # will request a writeable buffer. Data is written by the - # wrapped function to that buffer directly, and the length - # written (if the length is a pointer) will be returned - buffer_params: typing.Dict[str, BufferData] = {} - buflen_params: typing.Dict[str, BufferData] = {} - if data.buffers: - for bufinfo in data.buffers: - if bufinfo.src == bufinfo.len: - raise ValueError( - f"buffer src({bufinfo.src}) and len({bufinfo.len}) cannot be the same" - ) - buffer_params[bufinfo.src] = bufinfo - buflen_params[bufinfo.len] = bufinfo - - is_constructor = fn.get("constructor") - fn_disable_none = data.disable_none - - # Process parameters - - for i, p in enumerate(fn["parameters"]): - p_const = bool(p["constant"]) - p_reference = p["reference"] - p_pointer = p["pointer"] - - # automatically retain references passed to constructors - if is_constructor and p_reference == 1: - keepalives.append((1, i + 2)) - - if p["raw_type"] in _int32_types: - fundamental = True - else: - fundamental = p["fundamental"] - - cpp_type_no_const = p.get("enum", p["raw_type"]) - cpp_type = cpp_type_no_const - - p_name = p["name"] - orig_pname = p_name - if p_name == "": - p_name = f"param{i}" - - if p_pointer: - call_name = p_name - elif p_reference: - call_name = f"std::forward({p['name']})" - else: - call_name = f"std::move({p['name']})" - - # This is different because call_name might get special treatment later - virtual_call_name = call_name - cpp_retname = orig_pname - - # TODO: this is precarious - # - needs to override some things - force_out = False - default = None - disable_none = fn_disable_none - po = param_override.get(p_name) - if po: - force_out = po.force_out - if po.name: - p_name = po.name - if po.x_type: - cpp_type = po.x_type - if po.default: - default = po.default - if po.disable_none is not None: - disable_none = po.disable_none - else: - po = _default_param_data - - py_pname = p_name - if iskeyword(py_pname): - py_pname = f"{py_pname}_" - - if orig_pname != py_pname: - param_remap[orig_pname] = py_pname - - # Autodetect disable_none if not explicitly specified - if disable_none is None: - disable_none = cpp_type.startswith("std::function") - - if disable_none: - py_arg = f'py::arg("{py_pname}").none(false)' - else: - py_arg = f'py::arg("{py_pname}")' - - default = default or p.get("default", None) - if default: - default = self._resolve_default(fn, p, default, cpp_type) - if not po.disable_type_caster_default_cast: - default = self._add_default_arg_cast(p, default, cpp_type) - if default: - py_arg = f"{py_arg} = {default}" - - ptype = "in" - - buflen = buflen_params.pop(p_name, None) - - if p_name in buffer_params: - bufinfo = buffer_params.pop(p_name) - need_lambda = True - bname = f"__{bufinfo.src}" - p_const = True - p_reference = 1 - p_pointer = 0 - - call_name = f"({cpp_type}*){bname}.ptr" - cpp_type = "py::buffer" - - # this doesn't seem to be true for bytearrays, which is silly - # x_lambda_pre.append( - # f'if (PyBuffer_IsContiguous((Py_buffer*){p_name}.ptr(), \'C\') == 0) throw py::value_error("{p_name}: buffer must be contiguous")' - # ) + # def _function_hook( + # self, + # fn, + # data: FunctionData, + # scope_var: str, + # internal: bool, + # overload_tracker: OverloadTracker, + # ) -> FunctionContext: + # """shared with methods/functions""" + + # # if cpp_code is specified, don't release the gil unless the user + # # specifically asks for it + # if data.no_release_gil is None: + # if data.cpp_code: + # data.no_release_gil = True + + # x_all_params: typing.List[ParamContext] = [] + # x_in_params: typing.List[ParamContext] = [] + # out_params: typing.List[ParamContext] = [] + # x_filtered_params: typing.List[ParamContext] = [] + # x_rets: typing.List[_ReturnParamContext] = [] + # x_temps: typing.List[ParamContext] = [] + # keepalives = [] + + # param_remap = {} + + # has_buffers = len(data.buffers) > 0 + # need_lambda = False + # genlambda: typing.Optional[GeneratedLambda] = None + # lambda_pre: typing.List[str] = [] + + # # Use this if one of the parameter types don't quite match + # param_override = data.param_override + + # # buffers: accepts a python object that supports the buffer protocol + # # as input. If the buffer is an 'out' buffer, then it + # # will request a writeable buffer. Data is written by the + # # wrapped function to that buffer directly, and the length + # # written (if the length is a pointer) will be returned + # buffer_params: typing.Dict[str, BufferData] = {} + # buflen_params: typing.Dict[str, BufferData] = {} + # if data.buffers: + # for bufinfo in data.buffers: + # if bufinfo.src == bufinfo.len: + # raise ValueError( + # f"buffer src({bufinfo.src}) and len({bufinfo.len}) cannot be the same" + # ) + # buffer_params[bufinfo.src] = bufinfo + # buflen_params[bufinfo.len] = bufinfo + + # is_constructor = fn.get("constructor") + # fn_disable_none = data.disable_none + + # # Process parameters + + # for i, p in enumerate(fn["parameters"]): + # p_const = bool(p["constant"]) + # p_reference = p["reference"] + # p_pointer = p["pointer"] + + # # automatically retain references passed to constructors + # if is_constructor and p_reference == 1: + # keepalives.append((1, i + 2)) + + # if p["raw_type"] in _int32_types: + # fundamental = True + # else: + # fundamental = p["fundamental"] - # TODO: check for dimensions, strides, other dangerous things - - # bufinfo was validated and converted before it got here - if bufinfo.type is BufferType.IN: - ptype = "in" - lambda_pre += [f"auto {bname} = {p_name}.request(false)"] - else: - ptype = "in" - lambda_pre += [f"auto {bname} = {p_name}.request(true)"] - - lambda_pre += [f"{bufinfo.len} = {bname}.size * {bname}.itemsize"] - - if bufinfo.minsz: - lambda_pre.append( - f'if ({bufinfo.len} < {bufinfo.minsz}) throw py::value_error("{p_name}: minimum buffer size is {bufinfo.minsz}")' - ) - - elif buflen: - if p_pointer: - call_name = f"&{buflen.len}" - ptype = "out" - else: - # if it's not a pointer, then the called function - # can't communicate through it, so ignore the parameter - need_lambda = True - call_name = buflen.len - ptype = "tmp" - - elif force_out or ( - (p_pointer or p_reference == 1) and not p_const and fundamental - ): - if p_pointer: - call_name = f"&{call_name}" - else: - call_name = p_name - - ptype = "out" - elif p["array"]: - asz = po.array_size or p.get("array_size", 0) - if asz: - cpp_type = f"std::array<{cpp_type}, {asz}>" - call_name = f"{call_name}.data()" - if not default: - default = "{}" - else: - # it's a vector - pass - ptype = "out" - - self._add_type_caster(cpp_type) - - if p_const: - cpp_type = f"const {cpp_type}" - - x_type_full = cpp_type - x_type_full += "&" * p_reference - x_type_full += "*" * p_pointer - - x_decl = f"{x_type_full} {p_name}" - - pctx = ParamContext( - arg_name=p_name, - full_cpp_type=x_type_full, - cpp_type=cpp_type, - cpp_type_no_const=cpp_type_no_const, - default=default, - decl=x_decl, - py_arg=py_arg, - call_name=call_name, - virtual_call_name=virtual_call_name, - cpp_retname=cpp_retname, - const=p_const, - volatile=bool(p.get("volatile", 0)), - array=p.get("array"), - refs=p_reference, - pointers=p_pointer, - ) - - x_all_params.append(pctx) - if not po.ignore: - x_filtered_params.append(pctx) - if ptype == "out": - need_lambda = True - out_params.append(pctx) - x_temps.append(pctx) - - elif ptype == "in": - x_in_params.append(pctx) - - elif ptype == "tmp": - x_temps.append(pctx) - - if buffer_params: - raise ValueError( - "incorrect buffer param names '%s'" - % ("', '".join(buffer_params.keys())) - ) - - x_return_value_policy = _rvp_map[data.return_value_policy] - - # Set up the function's name - if data.rename: - # user preference wins, of course - py_name = data.rename - elif fn["constructor"]: - py_name = "__init__" - else: - # Python exposed function name converted to camelcase - py_name = self._make_py_name( - fn["name"], data, is_operator=fn.get("operator", False) - ) - if not py_name[:2].isupper(): - py_name = f"{py_name[0].lower()}{py_name[1:]}" - - if data.internal or internal: - py_name = f"_{py_name}" - - doc_quoted = self._process_doc(fn, data, param_remap=param_remap) - - # Allow the user to override our auto-detected keepalives - if data.keepalive is not None: - keepalives = data.keepalive - - ref_qualifiers = fn.get("ref_qualifiers", "") - - if not self.report_only: - if fn["template"]: - if data.template_impls is None and not data.cpp_code: - raise ValueError( - f"{fn['name']}: must specify template impls for function template" - ) - else: - if data.template_impls is not None: - raise ValueError( - f"{fn['name']}: cannot specify template_impls for non-template functions" - ) - - if data.ignore_pure and not fn["pure_virtual"]: - raise ValueError( - f"{fn['name']}: cannot specify ignore_pure for function that isn't pure" - ) - - if data.trampoline_cpp_code and (not fn["override"] and not fn["virtual"]): - raise ValueError( - f"{fn['name']}: cannot specify trampoline_cpp_code for a non-virtual method" - ) - - if data.virtual_xform and (not fn["override"] and not fn["virtual"]): - raise ValueError( - f"{fn['name']}: cannot specify virtual_xform for a non-virtual method" - ) - - if ref_qualifiers == "&&": - # pybind11 doesn't support this, user must fix it - if not data.ignore_py and not data.cpp_code: - raise ValueError( - f"{fn['name']}: has && ref-qualifier which cannot be directly bound by pybind11, must specify cpp_code or ignore_py" - ) - - # - # fn_retval is needed for gensig, vcheck assertions - # - gensig is not computable here - # - fn_retval: typing.Optional[str] = None - if not is_constructor: - # rtnType and returns are inconsistent in CppHeaderParser's output - # because sometimes it resolves them, so just do our best for now - - self._add_type_caster(fn["returns"]) - - retval = [] - if fn.get("returns_const"): - retval.append("const") - if "returns_enum" in fn: - retval.append(fn["rtnType"]) - else: - retval.append(fn["returns"]) - if fn["returns_pointer"]: - retval.append("*") - if fn["returns_reference"]: - retval.append("&") - fn_retval = " ".join(retval) - - # Lambda generation: - # - in_params (needed for py::arg generation) - # - x_lambda stuff - if need_lambda: - call_start = "" - lambda_ret = "" - - # Return all out parameters - x_rets.extend(out_params) - - if fn_retval != "void": - call_start = "auto __ret =" - x_rets.insert( - 0, _ReturnParamContext(cpp_retname="__ret", cpp_type=fn_retval) - ) - - if len(x_rets) == 1 and x_rets[0].cpp_type != "void": - lambda_ret = f"return {x_rets[0].cpp_retname};" - elif len(x_rets) > 1: - lambda_ret = "return std::make_tuple(%s);" % ",".join( - [p.cpp_retname for p in x_rets] - ) - - # Temporary values to store out parameters in - if x_temps: - for out in reversed(x_temps): - odef = out.default - if not odef: - lambda_pre.insert(0, f"{out.cpp_type} {out.arg_name}") - elif odef.startswith("{"): - lambda_pre.insert(0, f"{out.cpp_type} {out.arg_name}{odef}") - else: - lambda_pre.insert(0, f"{out.cpp_type} {out.arg_name} = {odef}") - - pre = _lambda_predent + f";\n{_lambda_predent}".join(lambda_pre) + ";" - - genlambda = GeneratedLambda( - pre=pre, - call_start=call_start, - ret=lambda_ret, - in_params=x_in_params, - out_params=out_params, - ) - - return FunctionContext( - cpp_name=fn["name"], - doc=doc_quoted, - scope_var=scope_var, - # transforms - py_name=py_name, - cpp_return_type=fn_retval, - all_params=x_all_params, - filtered_params=x_filtered_params, - has_buffers=has_buffers, - keepalives=keepalives, - return_value_policy=x_return_value_policy, - # lambda generation - genlambda=genlambda, - # info - const=fn["const"], - vararg=fn["vararg"], - ref_qualifiers=ref_qualifiers, - is_constructor=is_constructor, - # user settings - ignore_pure=data.ignore_pure, - ignore_py=data.ignore_py, - cpp_code=data.cpp_code, - trampoline_cpp_code=data.trampoline_cpp_code, - ifdef=data.ifdef, - ifndef=data.ifndef, - release_gil=not data.no_release_gil, - template_impls=data.template_impls, - virtual_xform=data.virtual_xform, - is_overloaded=overload_tracker, - ) - - def function_hook(self, fn, h2w_data): - # operators that aren't class members aren't rendered - if fn.get("operator"): - return - - signature = self._get_function_signature(fn) - data, overload_tracker = self.gendata.get_function_data(fn["name"], signature) - if data.ignore: - return - - scope_var = self._get_module_var(data) - fctx = self._function_hook(fn, data, scope_var, False, overload_tracker) - fctx.namespace = fn["namespace"] - self.hctx.functions.append(fctx) + # cpp_type_no_const = p.get("enum", p["raw_type"]) + # cpp_type = cpp_type_no_const + + # p_name = p["name"] + # orig_pname = p_name + # if p_name == "": + # p_name = f"param{i}" + + # if p_pointer: + # call_name = p_name + # elif p_reference: + # call_name = f"std::forward({p['name']})" + # else: + # call_name = f"std::move({p['name']})" + + # # This is different because call_name might get special treatment later + # virtual_call_name = call_name + # cpp_retname = orig_pname + + # # TODO: this is precarious + # # - needs to override some things + # force_out = False + # default = None + # disable_none = fn_disable_none + # po = param_override.get(p_name) + # if po: + # force_out = po.force_out + # if po.name: + # p_name = po.name + # if po.x_type: + # cpp_type = po.x_type + # if po.default: + # default = po.default + # if po.disable_none is not None: + # disable_none = po.disable_none + # else: + # po = _default_param_data + + # py_pname = p_name + # if iskeyword(py_pname): + # py_pname = f"{py_pname}_" + + # if orig_pname != py_pname: + # param_remap[orig_pname] = py_pname + + # # Autodetect disable_none if not explicitly specified + # if disable_none is None: + # disable_none = cpp_type.startswith("std::function") + + # if disable_none: + # py_arg = f'py::arg("{py_pname}").none(false)' + # else: + # py_arg = f'py::arg("{py_pname}")' + + # default = default or p.get("default", None) + # if default: + # default = self._resolve_default(fn, p, default, cpp_type) + # if not po.disable_type_caster_default_cast: + # default = self._add_default_arg_cast(p, default, cpp_type) + # if default: + # py_arg = f"{py_arg} = {default}" + + # ptype = "in" + + # buflen = buflen_params.pop(p_name, None) + + # if p_name in buffer_params: + # bufinfo = buffer_params.pop(p_name) + # need_lambda = True + # bname = f"__{bufinfo.src}" + # p_const = True + # p_reference = 1 + # p_pointer = 0 + + # call_name = f"({cpp_type}*){bname}.ptr" + # cpp_type = "py::buffer" + + # # this doesn't seem to be true for bytearrays, which is silly + # # x_lambda_pre.append( + # # f'if (PyBuffer_IsContiguous((Py_buffer*){p_name}.ptr(), \'C\') == 0) throw py::value_error("{p_name}: buffer must be contiguous")' + # # ) + + # # TODO: check for dimensions, strides, other dangerous things + + # # bufinfo was validated and converted before it got here + # if bufinfo.type is BufferType.IN: + # ptype = "in" + # lambda_pre += [f"auto {bname} = {p_name}.request(false)"] + # else: + # ptype = "in" + # lambda_pre += [f"auto {bname} = {p_name}.request(true)"] + + # lambda_pre += [f"{bufinfo.len} = {bname}.size * {bname}.itemsize"] + + # if bufinfo.minsz: + # lambda_pre.append( + # f'if ({bufinfo.len} < {bufinfo.minsz}) throw py::value_error("{p_name}: minimum buffer size is {bufinfo.minsz}")' + # ) + + # elif buflen: + # if p_pointer: + # call_name = f"&{buflen.len}" + # ptype = "out" + # else: + # # if it's not a pointer, then the called function + # # can't communicate through it, so ignore the parameter + # need_lambda = True + # call_name = buflen.len + # ptype = "tmp" + + # elif force_out or ( + # (p_pointer or p_reference == 1) and not p_const and fundamental + # ): + # if p_pointer: + # call_name = f"&{call_name}" + # else: + # call_name = p_name + + # ptype = "out" + # elif p["array"]: + # asz = po.array_size or p.get("array_size", 0) + # if asz: + # cpp_type = f"std::array<{cpp_type}, {asz}>" + # call_name = f"{call_name}.data()" + # if not default: + # default = "{}" + # else: + # # it's a vector + # pass + # ptype = "out" + + # self._add_type_caster(cpp_type) + + # if p_const: + # cpp_type = f"const {cpp_type}" + + # x_type_full = cpp_type + # x_type_full += "&" * p_reference + # x_type_full += "*" * p_pointer + + # x_decl = f"{x_type_full} {p_name}" + + # pctx = ParamContext( + # arg_name=p_name, + # full_cpp_type=x_type_full, + # cpp_type=cpp_type, + # cpp_type_no_const=cpp_type_no_const, + # default=default, + # decl=x_decl, + # py_arg=py_arg, + # call_name=call_name, + # virtual_call_name=virtual_call_name, + # cpp_retname=cpp_retname, + # const=p_const, + # volatile=bool(p.get("volatile", 0)), + # array=p.get("array"), + # refs=p_reference, + # pointers=p_pointer, + # ) + + # x_all_params.append(pctx) + # if not po.ignore: + # x_filtered_params.append(pctx) + # if ptype == "out": + # need_lambda = True + # out_params.append(pctx) + # x_temps.append(pctx) + + # elif ptype == "in": + # x_in_params.append(pctx) + + # elif ptype == "tmp": + # x_temps.append(pctx) + + # if buffer_params: + # raise ValueError( + # "incorrect buffer param names '%s'" + # % ("', '".join(buffer_params.keys())) + # ) + + # x_return_value_policy = _rvp_map[data.return_value_policy] + + # # Set up the function's name + # if data.rename: + # # user preference wins, of course + # py_name = data.rename + # elif fn["constructor"]: + # py_name = "__init__" + # else: + # # Python exposed function name converted to camelcase + # py_name = self._make_py_name( + # fn["name"], data, is_operator=fn.get("operator", False) + # ) + # if not py_name[:2].isupper(): + # py_name = f"{py_name[0].lower()}{py_name[1:]}" + + # if data.internal or internal: + # py_name = f"_{py_name}" + + # doc_quoted = self._process_doc(fn, data, param_remap=param_remap) + + # # Allow the user to override our auto-detected keepalives + # if data.keepalive is not None: + # keepalives = data.keepalive + + # ref_qualifiers = fn.get("ref_qualifiers", "") + + # if not self.report_only: + # if fn["template"]: + # if data.template_impls is None and not data.cpp_code: + # raise ValueError( + # f"{fn['name']}: must specify template impls for function template" + # ) + # else: + # if data.template_impls is not None: + # raise ValueError( + # f"{fn['name']}: cannot specify template_impls for non-template functions" + # ) + + # if data.ignore_pure and not fn["pure_virtual"]: + # raise ValueError( + # f"{fn['name']}: cannot specify ignore_pure for function that isn't pure" + # ) + + # if data.trampoline_cpp_code and (not fn["override"] and not fn["virtual"]): + # raise ValueError( + # f"{fn['name']}: cannot specify trampoline_cpp_code for a non-virtual method" + # ) + + # if data.virtual_xform and (not fn["override"] and not fn["virtual"]): + # raise ValueError( + # f"{fn['name']}: cannot specify virtual_xform for a non-virtual method" + # ) + + # if ref_qualifiers == "&&": + # # pybind11 doesn't support this, user must fix it + # if not data.ignore_py and not data.cpp_code: + # raise ValueError( + # f"{fn['name']}: has && ref-qualifier which cannot be directly bound by pybind11, must specify cpp_code or ignore_py" + # ) + + # # + # # fn_retval is needed for gensig, vcheck assertions + # # - gensig is not computable here + # # + # fn_retval: typing.Optional[str] = None + # if not is_constructor: + # # rtnType and returns are inconsistent in CppHeaderParser's output + # # because sometimes it resolves them, so just do our best for now + + # self._add_type_caster(fn["returns"]) + + # retval = [] + # if fn.get("returns_const"): + # retval.append("const") + # if "returns_enum" in fn: + # retval.append(fn["rtnType"]) + # else: + # retval.append(fn["returns"]) + # if fn["returns_pointer"]: + # retval.append("*") + # if fn["returns_reference"]: + # retval.append("&") + # fn_retval = " ".join(retval) + + # # Lambda generation: + # # - in_params (needed for py::arg generation) + # # - x_lambda stuff + # if need_lambda: + # call_start = "" + # lambda_ret = "" + + # # Return all out parameters + # x_rets.extend(out_params) + + # if fn_retval != "void": + # call_start = "auto __ret =" + # x_rets.insert( + # 0, _ReturnParamContext(cpp_retname="__ret", cpp_type=fn_retval) + # ) + + # if len(x_rets) == 1 and x_rets[0].cpp_type != "void": + # lambda_ret = f"return {x_rets[0].cpp_retname};" + # elif len(x_rets) > 1: + # lambda_ret = "return std::make_tuple(%s);" % ",".join( + # [p.cpp_retname for p in x_rets] + # ) + + # # Temporary values to store out parameters in + # if x_temps: + # for out in reversed(x_temps): + # odef = out.default + # if not odef: + # lambda_pre.insert(0, f"{out.cpp_type} {out.arg_name}") + # elif odef.startswith("{"): + # lambda_pre.insert(0, f"{out.cpp_type} {out.arg_name}{odef}") + # else: + # lambda_pre.insert(0, f"{out.cpp_type} {out.arg_name} = {odef}") + + # pre = _lambda_predent + f";\n{_lambda_predent}".join(lambda_pre) + ";" + + # genlambda = GeneratedLambda( + # pre=pre, + # call_start=call_start, + # ret=lambda_ret, + # in_params=x_in_params, + # out_params=out_params, + # ) + + # return FunctionContext( + # cpp_name=fn["name"], + # doc=doc_quoted, + # scope_var=scope_var, + # # transforms + # py_name=py_name, + # cpp_return_type=fn_retval, + # all_params=x_all_params, + # filtered_params=x_filtered_params, + # has_buffers=has_buffers, + # keepalives=keepalives, + # return_value_policy=x_return_value_policy, + # # lambda generation + # genlambda=genlambda, + # # info + # const=fn["const"], + # vararg=fn["vararg"], + # ref_qualifiers=ref_qualifiers, + # is_constructor=is_constructor, + # # user settings + # ignore_pure=data.ignore_pure, + # ignore_py=data.ignore_py, + # cpp_code=data.cpp_code, + # trampoline_cpp_code=data.trampoline_cpp_code, + # ifdef=data.ifdef, + # ifndef=data.ifndef, + # release_gil=not data.no_release_gil, + # template_impls=data.template_impls, + # virtual_xform=data.virtual_xform, + # is_overloaded=overload_tracker, + # ) + + # def function_hook(self, fn, h2w_data): + # # operators that aren't class members aren't rendered + # if fn.get("operator"): + # return + + # signature = self._get_function_signature(fn) + # data, overload_tracker = self.gendata.get_function_data(fn["name"], signature) + # if data.ignore: + # return + + # scope_var = self._get_module_var(data) + # fctx = self._function_hook(fn, data, scope_var, False, overload_tracker) + # fctx.namespace = fn["namespace"] + # self.hctx.functions.append(fctx) def class_hook(self, cls, h2w_data): # # ignore private classes @@ -993,27 +993,27 @@ def class_hook(self, cls, h2w_data): # f"{cls_name}: cannot specify template_params for non-template class" # ) - has_constructor = False - is_polymorphic = class_data.is_polymorphic - vcheck_fns: typing.List[FunctionContext] = [] + # has_constructor = False + # is_polymorphic = class_data.is_polymorphic + # vcheck_fns: typing.List[FunctionContext] = [] - # bad assumption? yep - if cls["inherits"]: - is_polymorphic = True + # # bad assumption? yep + # if cls["inherits"]: + # is_polymorphic = True # Accumulate methods into various lists used by the generator templates - wrapped_public_methods: typing.List[FunctionContext] = [] - wrapped_protected_methods: typing.List[FunctionContext] = [] + # wrapped_public_methods: typing.List[FunctionContext] = [] + # wrapped_protected_methods: typing.List[FunctionContext] = [] # This is: # - methods.public + cls.methods.protected if fn.final # - cls.methods.private if fn.final or fn.override - methods_to_disable: typing.List[FunctionContext] = [] - protected_constructors: typing.List[FunctionContext] = [] - virtual_methods: typing.List[FunctionContext] = [] - non_virtual_protected_methods: typing.List[FunctionContext] = [] + # methods_to_disable: typing.List[FunctionContext] = [] + # protected_constructors: typing.List[FunctionContext] = [] + # virtual_methods: typing.List[FunctionContext] = [] + # non_virtual_protected_methods: typing.List[FunctionContext] = [] for access, methods in ( ("public", wrapped_public_methods), @@ -1021,121 +1021,121 @@ def class_hook(self, cls, h2w_data): ("private", []), ): for fn in cls["methods"][access]: - is_constructor = fn["constructor"] - is_override = fn["override"] - is_virtual = fn["virtual"] or is_override - - has_constructor |= is_constructor - is_polymorphic |= is_virtual - - operator = fn.get("operator") - - # Ignore some operators, move constructors, copy constructors - if ( - (operator and operator not in _operators) - or fn.get("destructor") - or ( - is_constructor - and fn["parameters"] - and fn["parameters"][0]["class"] is cls - ) - or fn["deleted"] - ): - continue - - is_final = fn["final"] - is_private = access == "private" + # is_constructor = fn["constructor"] + # is_override = fn["override"] + # is_virtual = fn["virtual"] or is_override + + # has_constructor |= is_constructor + # is_polymorphic |= is_virtual + + # operator = fn.get("operator") + + # # Ignore some operators, move constructors, copy constructors + # if ( + # (operator and operator not in _operators) + # or fn.get("destructor") + # or ( + # is_constructor + # and fn["parameters"] + # and fn["parameters"][0]["class"] is cls + # ) + # or fn["deleted"] + # ): + # continue + + # is_final = fn["final"] + # is_private = access == "private" # this has to be done even on private functions, because # we do overload detection here - signature = self._get_function_signature(fn) - method_data, overload_tracker = self.gendata.get_function_data( - fn["name"], signature, cls_key, class_data, is_private - ) + # signature = self._get_function_signature(fn) + # method_data, overload_tracker = self.gendata.get_function_data( + # fn["name"], signature, cls_key, class_data, is_private + # ) # Have to process private virtual functions too if not is_private or is_virtual or is_final: if method_data.ignore: continue - if operator: - self.hctx.need_operators_h = True - if method_data.no_release_gil is None: - method_data.no_release_gil = True + # if operator: + # self.hctx.need_operators_h = True + # if method_data.no_release_gil is None: + # method_data.no_release_gil = True - internal = access != "public" + # internal = access != "public" - try: - fctx = self._function_hook( - fn, - method_data, - var_name, - internal, - overload_tracker, - ) - except Exception as e: - raise HookError(f"{cls_key}::{fn['name']}") from e + # try: + # fctx = self._function_hook( + # fn, + # method_data, + # var_name, + # internal, + # overload_tracker, + # ) + # except Exception as e: + # raise HookError(f"{cls_key}::{fn['name']}") from e # Update class-specific method attributes - if operator: - fctx.operator = operator - - if fn["static"]: - fctx.is_static_method = True - - if fn["pure_virtual"]: - fctx.is_pure_virtual = True - - # Update method lists - if is_private and is_override: - methods_to_disable.append(fctx) - else: - if is_final: - methods_to_disable.append(fctx) - - # disable virtual method generation for functions with buffer - # parameters (doing it correctly is hard, so we skip it) - if is_virtual and not fctx.has_buffers: - virtual_methods.append(fctx) - - if not is_private: - if not fctx.ignore_py: - methods.append(fctx) - - if access == "protected": - if is_constructor: - protected_constructors.append(fctx) - elif not is_virtual: - non_virtual_protected_methods.append(fctx) - - # If the method has cpp_code defined, it must either match the function - # signature of the method, or virtual_xform must be defined with an - # appropriate conversion. If neither of these are true, it will lead - # to difficult to diagnose errors at runtime. We add a static assert - # to try and catch these errors at compile time - need_vcheck = ( - is_virtual - and method_data.cpp_code - and not method_data.virtual_xform - and not method_data.trampoline_cpp_code - and not cls["final"] - and not class_data.force_no_trampoline - ) - if need_vcheck: - vcheck_fns.append(fctx) - self.hctx.has_vcheck = True + # if operator: + # fctx.operator = operator + + # if fn["static"]: + # fctx.is_static_method = True + + # if fn["pure_virtual"]: + # fctx.is_pure_virtual = True + + # # Update method lists + # if is_private and is_override: + # methods_to_disable.append(fctx) + # else: + # if is_final: + # methods_to_disable.append(fctx) + + # # disable virtual method generation for functions with buffer + # # parameters (doing it correctly is hard, so we skip it) + # if is_virtual and not fctx.has_buffers: + # virtual_methods.append(fctx) + + # if not is_private: + # if not fctx.ignore_py: + # methods.append(fctx) + + # if access == "protected": + # if is_constructor: + # protected_constructors.append(fctx) + # elif not is_virtual: + # non_virtual_protected_methods.append(fctx) + + # # If the method has cpp_code defined, it must either match the function + # # signature of the method, or virtual_xform must be defined with an + # # appropriate conversion. If neither of these are true, it will lead + # # to difficult to diagnose errors at runtime. We add a static assert + # # to try and catch these errors at compile time + # need_vcheck = ( + # is_virtual + # and method_data.cpp_code + # and not method_data.virtual_xform + # and not method_data.trampoline_cpp_code + # and not cls["final"] + # and not class_data.force_no_trampoline + # ) + # if need_vcheck: + # vcheck_fns.append(fctx) + # self.hctx.has_vcheck = True # If there isn't already a constructor, add a default constructor # - was going to add a FunctionContext for it, but.. that's challenging - add_default_constructor = ( - not has_constructor - and not class_data.nodelete - and not class_data.force_no_default_constructor - ) + # add_default_constructor = ( + # not has_constructor + # and not class_data.nodelete + # and not class_data.force_no_default_constructor + # ) - has_trampoline = ( - is_polymorphic and not cls["final"] and not class_data.force_no_trampoline - ) + # has_trampoline = ( + # is_polymorphic and not cls["final"] and not class_data.force_no_trampoline + # ) public_properties: typing.List[PropContext] = [] protected_properties: typing.List[PropContext] = [] @@ -1144,158 +1144,158 @@ def class_hook(self, cls, h2w_data): ("public", public_properties), ("protected", protected_properties), ): - # cannot bind protected properties without a trampoline, so - # don't bother processing them if there isn't one - if access == "protected" and not has_trampoline: - continue + # # cannot bind protected properties without a trampoline, so + # # don't bother processing them if there isn't one + # if access == "protected" and not has_trampoline: + # continue # class attributes - for v in cls["properties"][access]: - prop_name = v["name"] - propdata = self.gendata.get_cls_prop_data( - prop_name, cls_key, class_data - ) - if propdata.ignore: - continue - self._add_type_caster(v["raw_type"]) - if propdata.rename: - prop_name = propdata.rename - else: - prop_name = v["name"] if access == "public" else "_" + v["name"] - - if propdata.access == PropAccess.AUTOMATIC: - # const variables can't be written - if v["constant"] or v["constexpr"]: - prop_readonly = True - # We assume that a struct intentionally has readwrite data - # attributes regardless of type - elif cls["declaration_method"] != "class": - prop_readonly = False - else: - # Properties that aren't fundamental or a reference are readonly unless - # overridden by the hook configuration - prop_readonly = not v["fundamental"] and not v["reference"] - elif propdata.access == PropAccess.READONLY: - prop_readonly = True - else: - prop_readonly = False - - doc = self._process_doc(v, propdata) - - props.append( - PropContext( - py_name=prop_name, - cpp_name=v["name"], - cpp_type=v["type"], - readonly=prop_readonly, - doc=doc, - array_size=v.get("array_size", None), - array=v["array"], - reference=v["reference"], - static=v["static"], - ) - ) - - tctx: typing.Optional[TrampolineData] = None - - if has_trampoline: - tmpl = "" - if template_argument_list: - tmpl = f", {template_argument_list}" - - trampoline_cfg = f"rpygen::PyTrampolineCfg_{cls_cpp_identifier}<{template_argument_list}>" - tname = f"rpygen::PyTrampoline_{cls_cpp_identifier}" - tvar = f"{cls_name}_Trampoline" - - if base_template_params: - tmpl_args = ", ".join(base_template_args) - tmpl_params = ", ".join(base_template_params) - else: - tmpl_args = "" - tmpl_params = "" - - tctx = TrampolineData( - full_cpp_name=tname, - var=tvar, - inline_code=class_data.trampoline_inline_code, - tmpl_args=tmpl_args, - tmpl_params=tmpl_params, - methods_to_disable=methods_to_disable, - virtual_methods=virtual_methods, - protected_constructors=protected_constructors, - non_virtual_protected_methods=non_virtual_protected_methods, - ) - - elif class_data.trampoline_inline_code is not None: - raise HookError( - f"{cls_key} has trampoline_inline_code specified, but there is no trampoline!" - ) + # for v in cls["properties"][access]: + # prop_name = v["name"] + # propdata = self.gendata.get_cls_prop_data( + # prop_name, cls_key, class_data + # ) + # if propdata.ignore: + # continue + # self._add_type_caster(v["raw_type"]) + # if propdata.rename: + # prop_name = propdata.rename + # else: + # prop_name = v["name"] if access == "public" else "_" + v["name"] + + # if propdata.access == PropAccess.AUTOMATIC: + # # const variables can't be written + # if v["constant"] or v["constexpr"]: + # prop_readonly = True + # # We assume that a struct intentionally has readwrite data + # # attributes regardless of type + # elif cls["declaration_method"] != "class": + # prop_readonly = False + # else: + # # Properties that aren't fundamental or a reference are readonly unless + # # overridden by the hook configuration + # prop_readonly = not v["fundamental"] and not v["reference"] + # elif propdata.access == PropAccess.READONLY: + # prop_readonly = True + # else: + # prop_readonly = False + + # doc = self._process_doc(v, propdata) + + # props.append( + # PropContext( + # py_name=prop_name, + # cpp_name=v["name"], + # cpp_type=v["type"], + # readonly=prop_readonly, + # doc=doc, + # array_size=v.get("array_size", None), + # array=v["array"], + # reference=v["reference"], + # static=v["static"], + # ) + # ) + + # tctx: typing.Optional[TrampolineData] = None + + # if has_trampoline: + # tmpl = "" + # if template_argument_list: + # tmpl = f", {template_argument_list}" + + # trampoline_cfg = f"rpygen::PyTrampolineCfg_{cls_cpp_identifier}<{template_argument_list}>" + # tname = f"rpygen::PyTrampoline_{cls_cpp_identifier}" + # tvar = f"{cls_name}_Trampoline" + + # if base_template_params: + # tmpl_args = ", ".join(base_template_args) + # tmpl_params = ", ".join(base_template_params) + # else: + # tmpl_args = "" + # tmpl_params = "" + + # tctx = TrampolineData( + # full_cpp_name=tname, + # var=tvar, + # inline_code=class_data.trampoline_inline_code, + # tmpl_args=tmpl_args, + # tmpl_params=tmpl_params, + # methods_to_disable=methods_to_disable, + # virtual_methods=virtual_methods, + # protected_constructors=protected_constructors, + # non_virtual_protected_methods=non_virtual_protected_methods, + # ) + + # elif class_data.trampoline_inline_code is not None: + # raise HookError( + # f"{cls_key} has trampoline_inline_code specified, but there is no trampoline!" + # ) # do logic for extracting user defined typealiases here # - these are at class scope, so they can include template - typealias_names = set() - user_typealias = [] - self._extract_typealias(class_data.typealias, user_typealias, typealias_names) + # typealias_names = set() + # user_typealias = [] + # self._extract_typealias(class_data.typealias, user_typealias, typealias_names) # autodetect embedded using directives, but don't override anything # the user specifies # - these are in block scope, so they cannot include templates - auto_typealias = [] - for name, using in cls["using"].items(): - if ( - using["access"] == "public" - and name not in typealias_names - and not using["template"] - and using["using_type"] == "typealias" - ): - auto_typealias.append( - f"using {name} [[maybe_unused]] = typename {cls_qualname}::{name}" - ) - - doc = self._process_doc(cls, class_data) - py_name = self._make_py_name(cls_name, class_data) - - constants = [] - for constant in class_data.constants: - name = constant.split("::")[-1] - constants.append((name, constant)) - - cctx = ClassContext( - parent=parent_ctx, - namespace=cls["namespace"], - cpp_name=cls["name"], - full_cpp_name=cls_qualname, - full_cpp_name_identifier=cls_cpp_identifier, - py_name=py_name, - scope_var=scope_var, - var_name=var_name, - nodelete=class_data.nodelete, - final=cls["final"], - doc=doc, - bases=bases, - trampoline=tctx, - public_properties=public_properties, - protected_properties=protected_properties, - add_default_constructor=add_default_constructor, - wrapped_public_methods=wrapped_public_methods, - wrapped_protected_methods=wrapped_protected_methods, - enums=enums, - unnamed_enums=unnamed_enums, - template=template_data, - auto_typealias=auto_typealias, - vcheck_fns=vcheck_fns, - user_typealias=user_typealias, - constants=constants, - inline_code=class_data.inline_code or "", - force_multiple_inheritance=class_data.force_multiple_inheritance, - ) - - # store this to facilitate finding data in parent - cls["class_ctx"] = cctx - - if cctx.parent: - cctx.parent.child_classes.append(cctx) - else: - self.hctx.classes.append(cctx) - if cctx.trampoline: - self.hctx.classes_with_trampolines.append(cctx) + # auto_typealias = [] + # for name, using in cls["using"].items(): + # if ( + # using["access"] == "public" + # and name not in typealias_names + # and not using["template"] + # and using["using_type"] == "typealias" + # ): + # auto_typealias.append( + # f"using {name} [[maybe_unused]] = typename {cls_qualname}::{name}" + # ) + + # doc = self._process_doc(cls, class_data) + # py_name = self._make_py_name(cls_name, class_data) + + # constants = [] + # for constant in class_data.constants: + # name = constant.split("::")[-1] + # constants.append((name, constant)) + + # cctx = ClassContext( + # parent=parent_ctx, + # namespace=cls["namespace"], + # cpp_name=cls["name"], + # full_cpp_name=cls_qualname, + # full_cpp_name_identifier=cls_cpp_identifier, + # py_name=py_name, + # scope_var=scope_var, + # var_name=var_name, + # nodelete=class_data.nodelete, + # final=cls["final"], + # doc=doc, + # bases=bases, + # trampoline=tctx, + # public_properties=public_properties, + # protected_properties=protected_properties, + # add_default_constructor=add_default_constructor, + # wrapped_public_methods=wrapped_public_methods, + # wrapped_protected_methods=wrapped_protected_methods, + # enums=enums, + # unnamed_enums=unnamed_enums, + # template=template_data, + # auto_typealias=auto_typealias, + # vcheck_fns=vcheck_fns, + # user_typealias=user_typealias, + # constants=constants, + # inline_code=class_data.inline_code or "", + # force_multiple_inheritance=class_data.force_multiple_inheritance, + # ) + + # # store this to facilitate finding data in parent + # cls["class_ctx"] = cctx + + # if cctx.parent: + # cctx.parent.child_classes.append(cctx) + # else: + # self.hctx.classes.append(cctx) + # if cctx.trampoline: + # self.hctx.classes_with_trampolines.append(cctx) diff --git a/robotpy_build/autowrap/j2_context.py b/robotpy_build/autowrap/j2_context.py index 17ad850a..178e9b0e 100644 --- a/robotpy_build/autowrap/j2_context.py +++ b/robotpy_build/autowrap/j2_context.py @@ -10,9 +10,10 @@ # from dataclasses import dataclass, field +import enum import typing -from cxxheaderparser.types import PQName +from cxxheaderparser.types import Function, PQName from ..config.autowrap_yml import ReturnValuePolicy @@ -80,6 +81,13 @@ class EnumContext: inline_code: typing.Optional[str] +class ParamCategory(enum.Enum): + IGNORE = 0 + OUT = 1 + IN = 2 + TMP = 3 + + @dataclass class ParamContext: """Render data for each parameter""" @@ -88,11 +96,9 @@ class ParamContext: arg_name: str # name of type with const but no *, & + # .. why does this have const cpp_type: str - #: name of type without const, used in trampoline_signature - cpp_type_no_const: str - # contains 'const', &, etc full_cpp_type: str @@ -102,9 +108,6 @@ class ParamContext: #: passed to lambda default: typing.Optional[str] - # type + name - decl: str - #: Name to pass to function when calling the original #: .. only used by lambda call_name: str @@ -116,25 +119,21 @@ class ParamContext: #: name when used as an out parameter cpp_retname: str - #: type marked as const - #: -> used by trampoline signature - const: bool = False + #: Not used in jinja template, used for filtering + category: ParamCategory - #: type marked as volatile - #: -> used by trampoline signature - volatile: bool = False - - #: -> used by trampoline signature - array: typing.Optional[int] = None - - # Number of & - #: -> used by trampoline signature - refs: int = 0 - - # Number of * - #: -> used by trampoline signature - pointers: int = 0 + # type + name, rarely used + @property + def decl(self) -> str: + return f"{self.full_cpp_type} {self.arg_name}" + # only used for operator generation, rarely used + @property + def cpp_type_no_const(self) -> str: + ct = self.cpp_type + if ct.startswith("const "): + return ct[6:] + return ct @dataclass class GeneratedLambda: @@ -178,22 +177,9 @@ class FunctionContext: #: every parameter except ignored filtered_params: typing.List[ParamContext] - genlambda: typing.Optional[GeneratedLambda] - - #: Marked const - #: -> used by trampoline signature - const: bool - #: Has vararg parameters #: -> used by trampoline signature - vararg: bool - - #: & or && qualifiers for function - #: -> used by trampoline signature - ref_qualifiers: str - - #: Is this a constructor? - is_constructor: bool + # vararg: bool # # Mixed @@ -235,10 +221,26 @@ class FunctionContext: # OverloadTracker evaluates to True if there are overloads is_overloaded: OverloadTracker + # Used to compute the trampoline signature + _fn: Function + # # Cached/conditionally set properties # + genlambda: typing.Optional[GeneratedLambda] = None + + #: Is this a constructor? + is_constructor: bool = False + + #: & or && qualifiers for function + #: -> used by trampoline signature + # ref_qualifiers: str = "" + + #: Marked const + #: -> used by trampoline signature + # const: bool = False + is_pure_virtual: bool = False #: If there is a namespace associated with this function, this is it, @@ -280,7 +282,7 @@ class BaseClassData: Render data for each base that a class inherits """ - #: C++ name, including namespace/classname + #: C++ name, including all known components full_cpp_name: str # was x_qualname full_cpp_name_w_templates: str # was x_class @@ -411,6 +413,10 @@ class ClassContext: # Everything else # + # Not used in jinja_template + has_constructor: bool = False + is_polymorphic: bool = False + #: was x_has_trampoline trampoline: typing.Optional[TrampolineData] = None @@ -513,7 +519,7 @@ class HeaderContext: # trampolines # template_classes - # - this is a dict instead of a list because of a quirk in Jinja, change + # - TODO this is a dict instead of a list because of a quirk in Jinja, change # it back once we get rid of h2w template_instances: typing.Dict[str, TemplateInstanceContext] = field( default_factory=dict diff --git a/robotpy_build/autowrap/mangle.py b/robotpy_build/autowrap/mangle.py index eccfe30b..c1090aed 100644 --- a/robotpy_build/autowrap/mangle.py +++ b/robotpy_build/autowrap/mangle.py @@ -1,4 +1,18 @@ -from .j2_context import FunctionContext, ParamContext +import typing + +from .j2_context import FunctionContext +from cxxheaderparser.types import ( + Array, + DecoratedType, + FunctionType, + FundamentalSpecifier, + Method, + MoveReference, + NameSpecifier, + Pointer, + Reference, + Type, +) # yes, we include some stdint types in here, but that's fine, this # is just a best effort generator @@ -42,44 +56,80 @@ _type_trans = str.maketrans(_type_bad_chars, "_" * len(_type_bad_chars)) -def _encode_type(param: ParamContext) -> str: - names = [] +def _encode_type(dt: DecoratedType, names: typing.List[str]) -> str: + + # Decode the type + ptrs = 0 + refs = 0 + const = False + volatile = False + + t = dt + while True: + if isinstance(t, Type): + const = const or t.const + volatile = volatile or t.volatile + break + elif isinstance(t, Pointer): + ptrs += 1 + const = const or t.const + volatile = volatile or t.volatile + t = t.ptr_to + elif isinstance(t, Reference): + refs += 1 + t = t.ref_to + elif isinstance(t, MoveReference): + refs += 2 + t = t.moveref_to + else: + break # prefix with cv-qualifiers, refs, pointers - if param.const: + if const: names.append("K") - if param.volatile: + if volatile: names.append("V") - if param.array: - names.append("A" * param.array) + if isinstance(t, Array): + assert False # TODO, convert array size? + names.append("A" * t.size) - refs = param.refs if refs == 1: names.append("R") elif refs == 2: names.append("O") - ptr = param.pointers - if ptr: - names.append("P" * ptr) - - # actual type - typ = _builtins.get(param.cpp_type) - if not typ: - # Only mangle the typename, ignore namespaces as children might have the types - # aliased or something. There are cases where this would fail, but hopefully - # that doesn't happen? - # TODO: do this better - typename = param.full_cpp_type.split("::")[-1] - # assert " " not in raw_type, raw_type - typ = "T" + typename.replace(" ", "").translate(_type_trans) + if ptrs: + names.append("P" * ptrs) + + if isinstance(t, FunctionType): + # encode like a function but include the return type + names.append("F") + _encode_type(t.return_type, names) + params = t.parameters + if not params: + names.append("_v") + else: + for p in params: + names.append("_") + _encode_type(p.type, names) + if t.vararg: + names.append("_z") + else: + typename = t.typename.segments[-1] + if isinstance(typename, (FundamentalSpecifier, NameSpecifier)): + typ = _builtins.get(typename.name) + if not typ: + # .. good enough, there are cases where this would fail, but + # hopefully that doesn't happen? + typ = f"T{typename.name.translate(_type_trans)}" + else: + typ = "T__" - names.append(typ) - return "".join(names) + names.append(typ) -def trampoline_signature(fn: FunctionContext) -> str: +def trampoline_signature(fctx: FunctionContext) -> str: """ In our trampoline functions, each function can be disabled by defining a macro corresponding to the function type. This helper function @@ -94,33 +144,35 @@ def trampoline_signature(fn: FunctionContext) -> str: """ # fast path in case it was computed previously - if fn._trampoline_signature: - return fn._trampoline_signature + if fctx._trampoline_signature: + return fctx._trampoline_signature # TODO: operator overloads names = [] - if fn.const: - names.append("K") - refqual = fn.ref_qualifiers - if refqual: - if refqual == "&": - names.append("R") - if refqual == "&&": - names.append("O") + fn = fctx._fn + if isinstance(fn, Method): + if fn.const: + names.append("K") + refqual = fn.ref_qualifier + if refqual: + if refqual == "&": + names.append("R") + if refqual == "&&": + names.append("O") - names.append(fn.cpp_name) + names.append(fctx.cpp_name) - params = fn.all_params + params = fn.parameters if not params: names.append("_v") else: for p in params: names.append("_") - names.append(_encode_type(p)) + _encode_type(p.type, names) if fn.vararg: names.append("_z") - fn._trampoline_signature = "".join(names) - return fn._trampoline_signature + fctx._trampoline_signature = "".join(names) + return fctx._trampoline_signature diff --git a/robotpy_build/config/autowrap_yml.py b/robotpy_build/config/autowrap_yml.py index d26b4a95..3de8f2bd 100644 --- a/robotpy_build/config/autowrap_yml.py +++ b/robotpy_build/config/autowrap_yml.py @@ -306,17 +306,20 @@ class ClassData(Model): doc_append: Optional[str] = None ignore: bool = False + + #: List of bases to ignore. Name must include any template specializations. ignored_bases: List[str] = [] #: Specify fully qualified names for the bases. If the base has a template - #: parameter, you must include it + #: parameter, you must include it. Only needed if it can't be automatically + #: detected directly from the text. base_qualnames: Dict[str, str] = {} attributes: Dict[str, PropData] = {} enums: Dict[str, EnumData] = {} methods: Dict[str, FunctionData] = {} - is_polymorphic: bool = False + is_polymorphic: Optional[bool] = None force_no_trampoline: bool = False force_no_default_constructor: bool = False diff --git a/robotpy_build/generator_data.py b/robotpy_build/generator_data.py index 59d11fd8..78513c72 100644 --- a/robotpy_build/generator_data.py +++ b/robotpy_build/generator_data.py @@ -9,8 +9,10 @@ ) from .autowrap.j2_context import OverloadTracker +from cxxheaderparser.types import Function + import dataclasses -from typing import Dict, Optional, Tuple +from typing import Dict, List, Optional, Tuple @dataclasses.dataclass @@ -19,6 +21,11 @@ class FnReportData: overloads: Dict[str, bool] = dataclasses.field(default_factory=dict) tracker: OverloadTracker = dataclasses.field(default_factory=OverloadTracker) + # need to be put into overloads if reports are being made + deferred_signatures: List[Tuple[Function, bool]] = dataclasses.field( + default_factory=list + ) + AttrMissingData = Dict[str, bool] EnumMissingData = Dict[str, bool] @@ -89,7 +96,7 @@ def get_enum_data(self, name: str) -> EnumData: def get_function_data( self, name: str, - signature: str, + fn: Function, cls_key: Optional[str] = None, cls_data: Optional[ClassData] = None, is_private: bool = False, @@ -109,9 +116,22 @@ def get_function_data( missing = data is None report_data.missing = missing and not is_private + # When retrieving function data, we have to take into account which overload + # is being processed, so that the user can customize each overload uniquely + # if desired + + # most functions don't have overloads, so instead of computing the + # signature each time we defer it until we actually need to use it + if missing: data = _default_fn_data + report_data.deferred_signatures.append((fn, is_private)) + elif not data.overloads: + report_data.deferred_signatures.append((fn, True)) else: + # When there is overload data present, we have to actually compute + # the signature of every function + signature = self._get_function_signature(fn) overload = data.overloads.get(signature) missing = overload is None if not missing and overload: @@ -120,8 +140,8 @@ def get_function_data( del data["overloads"] data.update(overload.dict(exclude_unset=True)) data = FunctionData(**data) + report_data.overloads[signature] = is_private or not missing - report_data.overloads[signature] = is_private or not missing report_data.tracker.add_overload() return data, report_data.tracker @@ -199,8 +219,14 @@ def _process_missing( for fn, fndata in fns.items(): fn = str(fn) overloads = fndata.overloads - overloads_count = len(overloads) + deferred_signatures = fndata.deferred_signatures + overloads_count = len(overloads) + len(deferred_signatures) if overloads_count > 1: + # process each deferred signature + for fn, v in deferred_signatures: + signature = self._get_function_signature(fn) + overloads[signature] = v + has_data = all(overloads.values()) else: has_data = not fndata.missing @@ -227,6 +253,27 @@ def _process_missing( return data + def _get_function_signature(self, fn: Function) -> str: + """ + Only includes the names of parameters and a [const] indicator if needed + """ + + from .autowrap.cxxparser import _fmt_type + signature = ", ".join(_fmt_type(p.type for p in fn.parameters)) + + if getattr(fn, "const", False): + if signature: + signature = f"{signature} [const]" + else: + signature = "[const]" + elif fn.constexpr: + if signature: + signature = f"{signature} [constexpr]" + else: + signature = "[constexpr]" + + return signature + class MissingReporter: def __init__(self):