From d6f4c1b0ed80cd9f62cc6b87f55102c5f5d3095f Mon Sep 17 00:00:00 2001 From: David Leopoldseder Date: Mon, 5 May 2025 09:32:30 +0200 Subject: [PATCH] Add NI support for Arena.ofShared() --- .../api/directives/GraalDirectives.java | 7 + .../src/jdk/graal/compiler/graph/Node.java | 50 + .../jdk/graal/compiler/nodes/FrameState.java | 10 +- .../jdk/graal/compiler/nodes/UnwindNode.java | 6 + .../graphbuilderconf/GraphBuilderContext.java | 21 + .../nodes/java/ExceptionObjectNode.java | 5 + .../StandardGraphBuilderPlugins.java | 35 +- substratevm/CHANGELOG.md | 1 + substratevm/mx.substratevm/suite.py | 13 + .../core/foreign/ForeignFunctionsRuntime.java | 4 +- .../core/foreign/SubstrateForeignUtil.java | 103 ++ .../foreign/SubstrateMappedMemoryUtils.java | 60 + .../core/foreign/SyncCloseScopeOperation.java | 42 + .../Target_java_nio_MappedMemoryUtils.java | 73 + ...dk_internal_foreign_MemorySessionImpl.java | 41 + ..._jdk_internal_misc_ScopedMemoryAccess.java | 189 ++- ..._ScopedMemoryAccess_ScopedAccessError.java | 32 + .../com/oracle/svm/core/ArenaIntrinsics.java | 74 + .../com/oracle/svm/core/SubstrateOptions.java | 9 + .../graal/snippets/SafepointSnippets.java | 13 +- .../oracle/svm/core/nodes/ClusterNode.java | 42 + .../foreign/MemoryArenaValidInScopeNode.java | 106 ++ .../ScopedMemExceptionHandlerClusterNode.java | 258 ++++ .../core/nodes/foreign/ScopedMethodNode.java | 94 ++ .../foreign/ForeignFunctionsFeature.java | 114 +- ...bstrateOptimizeSharedArenaAccessPhase.java | 1227 +++++++++++++++++ .../svm/hosted/NativeImageGenerator.java | 5 + .../oracle/svm/hosted/SharedArenaSupport.java | 65 + .../InlineBeforeAnalysisPolicyUtils.java | 83 +- .../phases/SharedGraphBuilderPhase.java | 308 ++++- .../SubstrateGraphBuilderPlugins.java | 46 +- 31 files changed, 3058 insertions(+), 78 deletions(-) create mode 100644 substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SubstrateForeignUtil.java create mode 100644 substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SubstrateMappedMemoryUtils.java create mode 100644 substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SyncCloseScopeOperation.java create mode 100644 substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_java_nio_MappedMemoryUtils.java create mode 100644 substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_foreign_MemorySessionImpl.java create mode 100644 substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_misc_ScopedMemoryAccess_ScopedAccessError.java create mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/ArenaIntrinsics.java create mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/ClusterNode.java create mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/MemoryArenaValidInScopeNode.java create mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/ScopedMemExceptionHandlerClusterNode.java create mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/ScopedMethodNode.java create mode 100644 substratevm/src/com.oracle.svm.hosted.foreign/src/com/oracle/svm/hosted/foreign/phases/SubstrateOptimizeSharedArenaAccessPhase.java create mode 100644 substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SharedArenaSupport.java diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/api/directives/GraalDirectives.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/api/directives/GraalDirectives.java index 55c2c5b63010..b8383ae47f21 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/api/directives/GraalDirectives.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/api/directives/GraalDirectives.java @@ -47,6 +47,13 @@ public final class GraalDirectives { public static final double SLOWPATH_PROBABILITY = 0.0001; public static final double FASTPATH_PROBABILITY = 1.0 - SLOWPATH_PROBABILITY; + /** + * Forces a safepoint in the compiled code. + */ + public static void safepoint() { + + } + /** * Directive for the compiler to fall back to the bytecode interpreter at this point. All * arguments to this method must be compile-time constant. diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/graph/Node.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/graph/Node.java index d9c338c7c589..1e7f9615c160 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/graph/Node.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/graph/Node.java @@ -547,6 +547,24 @@ public final boolean hasExactlyOneUsageOfType(InputType inputType) { return numUses == 1; } + /** + * Checks whether {@code this} has any usages of type {@code inputType}. + * + * @param inputType the type of usages to look for + */ + public final boolean hasUsagesOfType(InputType inputType) { + for (Node usage : usages()) { + for (Position pos : usage.inputPositions()) { + if (pos.get(usage) == this) { + if (pos.getInputType() == inputType) { + return true; + } + } + } + } + return false; + } + /** * Adds a given node to this node's {@linkplain #usages() usages}. * @@ -1179,6 +1197,38 @@ public void replaceAtUsages(Node replacement, InputType inputType) { } } + /** + * For each use of {@code this} in another node, {@code n}, replace it with {@code replacement} + * if the type of the use is in {@code inputTypes} and if {@code filter.test(n) == true}. + * + * @see #replaceAtUsages(Node) + */ + public void replaceAtUsages(Node replacement, Predicate filter, InputType inputType) { + checkReplaceWith(replacement); + int i = 0; + int usageCount = this.getUsageCount(); + if (usageCount == 0) { + return; + } + usages: while (i < usageCount) { + Node usage = this.getUsageAt(i); + if (filter.test(usage)) { + for (Position pos : usage.inputPositions()) { + if (pos.getInputType() == inputType && pos.get(usage) == this) { + replaceAtUsagePos(replacement, usage, pos); + this.movUsageFromEndTo(i); + usageCount--; + continue usages; + } + } + } + i++; + } + if (hasNoUsages()) { + maybeNotifyZeroUsages(this); + } + } + /** * For each use of {@code this} in another node, {@code n}, replace it with {@code replacement} * if the type of the use is in {@code inputTypes}. diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/FrameState.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/FrameState.java index 5dcb7cdf3642..6bfe61a69580 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/FrameState.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/FrameState.java @@ -302,9 +302,13 @@ public FrameState(int bci) { * @param bci this must be {@link BytecodeFrame#AFTER_BCI} */ public FrameState(int bci, ValueNode returnValueOrExceptionObject) { - this(null, null, bci, 0, returnValueOrExceptionObject.getStackKind().getSlotCount(), 0, returnValueOrExceptionObject instanceof ExceptionObjectNode ? StackState.Rethrow : StackState.BeforePop, + this(bci, returnValueOrExceptionObject, null, true); + } + + public FrameState(int bci, ValueNode returnValueOrExceptionObject, Bytecode code, boolean checkBCI) { + this(null, code, bci, 0, returnValueOrExceptionObject.getStackKind().getSlotCount(), 0, returnValueOrExceptionObject instanceof ExceptionObjectNode ? StackState.Rethrow : StackState.BeforePop, true, null, null); - assert (bci == BytecodeFrame.AFTER_BCI && !rethrowException()) || (bci == BytecodeFrame.AFTER_EXCEPTION_BCI && rethrowException()) : Assertions.errorMessage(bci); + assert !checkBCI || (bci == BytecodeFrame.AFTER_BCI && !rethrowException()) || (bci == BytecodeFrame.AFTER_EXCEPTION_BCI && rethrowException()) : Assertions.errorMessage(bci); ValueNode[] stack = {returnValueOrExceptionObject}; this.values = new NodeInputList<>(this, stack); } @@ -568,7 +572,7 @@ public FrameState duplicateModified(JavaKind popKind, ValueNode pushedValue, List pushedVirtualObjectMappings) { assert pushedValue != null; - assert pushedValue.getStackKind() == popKind : Assertions.errorMessage(pushedValue, popKind, this); + assert pushedValue.getStackKind() == pushedSlotKind : Assertions.errorMessage(pushedValue, popKind, this); return duplicateModified(graph(), bci, stackState, popKind, new JavaKind[]{pushedSlotKind}, new ValueNode[]{pushedValue}, pushedVirtualObjectMappings); } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/UnwindNode.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/UnwindNode.java index 50210f80b72a..600f03abc86e 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/UnwindNode.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/UnwindNode.java @@ -58,4 +58,10 @@ public UnwindNode(ValueNode exception) { public void generate(NodeLIRBuilderTool gen) { gen.getLIRGeneratorTool().emitUnwind(gen.operand(exception())); } + + public void setException(ValueNode exception) { + updateUsages(this.exception, exception); + this.exception = exception; + } + } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/graphbuilderconf/GraphBuilderContext.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/graphbuilderconf/GraphBuilderContext.java index 684315bfb0fe..fc96098c427d 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/graphbuilderconf/GraphBuilderContext.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/graphbuilderconf/GraphBuilderContext.java @@ -28,6 +28,8 @@ import static jdk.graal.compiler.core.common.type.StampFactory.objectNonNull; import static jdk.vm.ci.meta.DeoptimizationAction.InvalidateReprofile; +import java.util.List; + import jdk.graal.compiler.bytecode.Bytecode; import jdk.graal.compiler.core.common.type.AbstractPointerStamp; import jdk.graal.compiler.core.common.type.IntegerStamp; @@ -71,6 +73,7 @@ import jdk.graal.compiler.nodes.extended.GuardingNode; import jdk.graal.compiler.nodes.java.InstanceOfDynamicNode; import jdk.graal.compiler.nodes.type.StampTool; +import jdk.internal.misc.ScopedMemoryAccess; import jdk.vm.ci.code.BailoutException; import jdk.vm.ci.meta.Assumptions; import jdk.vm.ci.meta.DeoptimizationAction; @@ -595,4 +598,22 @@ static T setStateAfterIfNecessary(GraphBuilderContext b, T valu default boolean currentBlockCatchesOOME() { return false; } + + /** + * Iff this parsing context is processing a method that is annotated with + * {@link ScopedMemoryAccess} saves the associated session object. + * + * @param scopedMemorySession the currently parsed session of this context + */ + default void setIsParsingScopedMemoryMethod(ValueNode scopedMemorySession) { + // nothing to do + } + + /** + * Determines if the current parsing context has set any scoped memory access that needs to be + * handled. + */ + default List getScopedMemorySessions() { + return null; + } } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/java/ExceptionObjectNode.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/java/ExceptionObjectNode.java index 32b53b29ad33..48382a47d0ef 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/java/ExceptionObjectNode.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/nodes/java/ExceptionObjectNode.java @@ -30,6 +30,7 @@ import org.graalvm.word.LocationIdentity; +import jdk.graal.compiler.core.common.type.Stamp; import jdk.graal.compiler.core.common.type.StampFactory; import jdk.graal.compiler.core.common.type.TypeReference; import jdk.graal.compiler.debug.GraalError; @@ -75,6 +76,10 @@ public ExceptionObjectNode(MetaAccessProvider metaAccess) { super(TYPE, StampFactory.objectNonNull(TypeReference.createTrustedWithoutAssumptions(metaAccess.lookupJavaType(Throwable.class)))); } + public ExceptionObjectNode(Stamp s) { + super(TYPE, s); + } + @Override public LocationIdentity getKilledLocationIdentity() { return LocationIdentity.any(); diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/replacements/StandardGraphBuilderPlugins.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/replacements/StandardGraphBuilderPlugins.java index e72faa60b2d2..c97cc0b65155 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/replacements/StandardGraphBuilderPlugins.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/replacements/StandardGraphBuilderPlugins.java @@ -88,6 +88,7 @@ import jdk.graal.compiler.nodes.NamedLocationIdentity; import jdk.graal.compiler.nodes.NodeView; import jdk.graal.compiler.nodes.PiNode; +import jdk.graal.compiler.nodes.SafepointNode; import jdk.graal.compiler.nodes.ProfileData.BranchProbabilityData; import jdk.graal.compiler.nodes.SpinWaitNode; import jdk.graal.compiler.nodes.StateSplit; @@ -720,7 +721,8 @@ private static void registerUnsafePlugins(InvocationPlugins plugins, Replacement jdkInternalMiscUnsafe.register(new AllocateUninitializedArrayPlugin("allocateUninitializedArray0", false)); } - private static void registerUnsafeAtomicsPlugins(Registration r, boolean isSunMiscUnsafe, boolean explicitUnsafeNullChecks, String casPrefix, JavaKind[] supportedJavaKinds, + private static void registerUnsafeAtomicsPlugins(Registration r, boolean isSunMiscUnsafe, boolean explicitUnsafeNullChecks, String casPrefix, + JavaKind[] supportedJavaKinds, MemoryOrderMode... memoryOrders) { for (JavaKind kind : supportedJavaKinds) { Class javaClass = getJavaClass(kind); @@ -787,7 +789,8 @@ private static void registerUnsafePlugins0(Registration r, boolean sunMiscUnsafe r.register(new UnsafePutPlugin(kind, explicitUnsafeNullChecks, putName, Receiver.class, Object.class, long.class, javaClass)); // Volatile object-based accesses r.register(new UnsafeGetPlugin(kind, MemoryOrderMode.VOLATILE, explicitUnsafeNullChecks, getName + "Volatile", Receiver.class, Object.class, long.class)); - r.register(new UnsafePutPlugin(kind, MemoryOrderMode.VOLATILE, explicitUnsafeNullChecks, putName + "Volatile", Receiver.class, Object.class, long.class, javaClass)); + r.register(new UnsafePutPlugin(kind, MemoryOrderMode.VOLATILE, explicitUnsafeNullChecks, putName + "Volatile", Receiver.class, Object.class, long.class, + javaClass)); // Ordered object-based accesses if (sunMiscUnsafe) { if (kind == JavaKind.Int || kind == JavaKind.Long || kind == JavaKind.Object) { @@ -1546,11 +1549,13 @@ private static FixedWithNextNode createMemoryAccessNode(StructuredGraph graph, U return nodeConstructor.create(ConstantNode.forLong(0L, graph), OFF_HEAP_LOCATION); } - private void setAccessNodeResult(FixedWithNextNode node, GraphBuilderContext b) { + private FixedWithNextNode setAccessNodeResult(FixedWithNextNode node, GraphBuilderContext b) { if (returnKind != JavaKind.Void) { b.addPush(returnKind, node); + return node; } else { b.add(node); + return node; } } @@ -1582,8 +1587,8 @@ protected final void createUnsafeAccess(ValueNode value, GraphBuilderContext b, FixedWithNextNode[] accessNodes = new FixedWithNextNode[]{objectAccess, memoryAccess}; LogicNode condition = graph.addOrUniqueWithInputs(IsNullNode.create(value)); - // We do not know the probability of this being a native memory or object, thus we - // inject 0.5. We still inject it to ensure no code verifying profiles reports + // We do not know the probability of this being a native memory or object, thus + // we inject 0.5. We still inject it to ensure no code verifying profiles reports // missing ones. BranchProbabilityData probability = BranchProbabilityData.injected(0.5, true); IfNode ifNode = b.add(new IfNode(condition, memoryAccess, objectAccess, probability)); @@ -1616,6 +1621,7 @@ protected final void createUnsafeAccess(ValueNode value, GraphBuilderContext b, } b.setStateAfter(merge); } + } } @@ -1651,7 +1657,10 @@ public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Rec // Emits a null-check for the otherwise unused receiver unsafe.get(true); // Note that non-ordered raw accesses can be turned into floatable field accesses. - UnsafeNodeConstructor unsafeNodeConstructor = (obj, loc) -> new RawLoadNode(obj, offset, unsafeAccessKind, loc, memoryOrder); + UnsafeNodeConstructor unsafeNodeConstructor = (obj, loc) -> { + RawLoadNode rl = new RawLoadNode(obj, offset, unsafeAccessKind, loc, memoryOrder); + return rl; + }; createUnsafeAccess(object, b, unsafeNodeConstructor, RawLoadNode.class); return true; } @@ -1691,7 +1700,10 @@ public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Rec // Emits a null-check for the otherwise unused receiver unsafe.get(true); ValueNode maskedValue = b.maskSubWordValue(value, unsafeAccessKind); - createUnsafeAccess(object, b, (obj, loc) -> new RawStoreNode(obj, offset, maskedValue, unsafeAccessKind, loc, true, memoryOrder), RawStoreNode.class); + createUnsafeAccess(object, b, (obj, loc) -> { + RawStoreNode store = new RawStoreNode(obj, offset, maskedValue, unsafeAccessKind, loc, true, memoryOrder); + return store; + }, RawStoreNode.class); return true; } } @@ -1846,6 +1858,15 @@ DeoptimizeNode add(GraphBuilderContext b, DeoptimizationAction action, Deoptimiz private static void registerGraalDirectivesPlugins(InvocationPlugins plugins, SnippetReflectionProvider snippetReflection) { Registration r = new Registration(plugins, GraalDirectives.class); + + r.register(new RequiredInlineOnlyInvocationPlugin("safepoint") { + @Override + public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver) { + b.append(new SafepointNode()); + return true; + } + }); + r.register(new DeoptimizePlugin(snippetReflection, None, TransferToInterpreter, false, "deoptimize")); r.register(new DeoptimizePlugin(snippetReflection, InvalidateReprofile, TransferToInterpreter, false, "deoptimizeAndInvalidate")); r.register(new DeoptimizePlugin(snippetReflection, null, null, null, diff --git a/substratevm/CHANGELOG.md b/substratevm/CHANGELOG.md index 07e4b2ddf444..fbcca64fc7d6 100644 --- a/substratevm/CHANGELOG.md +++ b/substratevm/CHANGELOG.md @@ -3,6 +3,7 @@ This changelog summarizes major changes to GraalVM Native Image. ## GraalVM for JDK 25 +* (GR-52276) (GR-61959) Add support for Arena.ofShared(). * (GR-58668) Enabled [Whole-Program Sparse Conditional Constant Propagation (WP-SCCP)](https://github.com/oracle/graal/pull/9821) by default, improving the precision of points-to analysis in Native Image. This optimization enhances static analysis accuracy and scalability, potentially reducing the size of the final native binary. * (GR-59313) Deprecated class-level metadata extraction using `native-image-inspect` and removed option `DumpMethodsData`. Use class-level SBOMs instead by passing `--enable-sbom=class-level,export` to the `native-image` builder. The default value of option `IncludeMethodData` was changed to `false`. * (GR-52400) The build process now uses 85% of system memory in containers and CI environments. Otherwise, it tries to only use available memory. If less than 8GB of memory are available, it falls back to 85% of system memory. The reason for the selected memory limit is now also shown in the build resources section of the build output. diff --git a/substratevm/mx.substratevm/suite.py b/substratevm/mx.substratevm/suite.py index 719da3dc3308..3e01542c1967 100644 --- a/substratevm/mx.substratevm/suite.py +++ b/substratevm/mx.substratevm/suite.py @@ -365,6 +365,7 @@ "sun.util.calendar", "sun.util.locale.provider", "sun.util.resources", + "jdk.internal.foreign", "jdk.internal.access", "jdk.internal.event", "jdk.internal.loader", @@ -727,6 +728,7 @@ "jdk.internal.loader", "jdk.internal.misc", "jdk.internal.vm.annotation", + "jdk.internal.foreign", "sun.net.www", "sun.reflect.annotation", "sun.security.jca", @@ -787,6 +789,10 @@ ], "requiresConcealed": { "java.base": [ + "jdk.internal.misc", + "jdk.internal.util", + "jdk.internal.access.foreign", + "jdk.internal.vm.vector", "jdk.internal.foreign", "jdk.internal.foreign.abi", "jdk.internal.foreign.abi.x64", @@ -833,6 +839,8 @@ ], "requiresConcealed": { "java.base": [ + "jdk.internal.misc", + "jdk.internal.util", "jdk.internal.foreign", "jdk.internal.foreign.abi", "jdk.internal.foreign.abi.x64.windows", @@ -2518,6 +2526,9 @@ ], "moduleInfo" : { "name" : "org.graalvm.nativeimage.foreign", + "opens" : [ + "com.oracle.svm.core.foreign to org.graalvm.nativeimage.builder" + ], "requires" : [ "org.graalvm.nativeimage.builder", "org.graalvm.collections", @@ -2533,6 +2544,8 @@ "jdk.vm.ci.aarch64", ], "java.base": [ + "jdk.internal.misc", + "jdk.internal.util", "jdk.internal.foreign", "jdk.internal.foreign.abi", "jdk.internal.foreign.abi.x64", diff --git a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/ForeignFunctionsRuntime.java b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/ForeignFunctionsRuntime.java index ec4bf1e8adf4..23f5cfa0cb79 100644 --- a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/ForeignFunctionsRuntime.java +++ b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/ForeignFunctionsRuntime.java @@ -103,13 +103,13 @@ public void addDowncallStubPointer(NativeEntryPointInfo nep, CFunctionPointer pt @Platforms(Platform.HOSTED_ONLY.class) public void addUpcallStubPointer(JavaEntryPointInfo jep, CFunctionPointer ptr) { - VMError.guarantee(!upcallStubs.containsKey(jep), "Seems like multiple stubs were generated for " + jep); + VMError.guarantee(!upcallStubs.containsKey(jep), "Seems like multiple stubs were generated for %s", jep); VMError.guarantee(upcallStubs.put(jep, new FunctionPointerHolder(ptr)) == null); } @Platforms(Platform.HOSTED_ONLY.class) public void addDirectUpcallStubPointer(DirectMethodHandleDesc desc, CFunctionPointer ptr) { - VMError.guarantee(!directUpcallStubs.containsKey(desc), "Seems like multiple stubs were generated for " + desc); + VMError.guarantee(!directUpcallStubs.containsKey(desc), "Seems like multiple stubs were generated for %s", desc); VMError.guarantee(directUpcallStubs.put(desc, new FunctionPointerHolder(ptr)) == null); } diff --git a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SubstrateForeignUtil.java b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SubstrateForeignUtil.java new file mode 100644 index 000000000000..38d7fe7150bb --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SubstrateForeignUtil.java @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.foreign; + +import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.ArenaIntrinsics; +import com.oracle.svm.core.SubstrateOptions; +import com.oracle.svm.core.nodes.foreign.ScopedMemExceptionHandlerClusterNode.ClusterBeginNode; +import com.oracle.svm.core.nodes.foreign.ScopedMemExceptionHandlerClusterNode.ExceptionInputNode; +import com.oracle.svm.core.nodes.foreign.ScopedMemExceptionHandlerClusterNode.ExceptionPathNode; +import com.oracle.svm.core.nodes.foreign.ScopedMemExceptionHandlerClusterNode.RegularPathNode; +import com.oracle.svm.util.LogUtils; + +import jdk.graal.compiler.api.directives.GraalDirectives; +import jdk.internal.foreign.MemorySessionImpl; + +/** + * For details on the implementation of shared arenas on substrate see + * {@link Target_jdk_internal_misc_ScopedMemoryAccess}. + * + * Note that this code should only be called by substitutions in + * {@link Target_jdk_internal_misc_ScopedMemoryAccess}. + */ +public class SubstrateForeignUtil { + @AlwaysInline("Compiler expects the code shape") + public static void logExceptionSeen(Throwable e) { + if (SubstrateOptions.printClosedArenaUponThrow()) { + try { + LogUtils.info("Saw an exception in the cluster portion of the graph, message was=" + e.getMessage() + "\n"); + } catch (Throwable t) { + // swallow nothing to do + } + } + } + + @AlwaysInline("Compiler expects the code shape") + public static void checkIdentity(T actual, T expected) { + if (actual != expected) { + throw new IllegalArgumentException("Unexpected object"); + } + } + + @AlwaysInline("factored out only for readability") + public static void checkSession(MemorySessionImpl session) { + if (session != null) { + session.checkValidStateRaw(); + } + } + + /** + * Handles exceptions related to memory sessions within a specific arena scope. + * + * This method checks if the {@link java.lang.foreign.Arena} associated with {@code session} is + * in a valid state. If validation fails, it logs the exception and propagates it through the + * exception path. + * + * @param session the memory session to handle exceptions for + * @param base the base object associated with the arena + * @param offset the offset within the arena + */ + @AlwaysInline("factored out only for readability") + public static void sessionExceptionHandler(MemorySessionImpl session, Object base, long offset) { + long scope = ArenaIntrinsics.checkArenaValidInScope(session, base, offset); + if (scope != 0) { + ClusterBeginNode.beginExceptionCluster(scope); + MemorySessionImpl clusterSession = ExceptionInputNode.clusterInputValue(scope, session); + if (clusterSession != null) { + try { + clusterSession.checkValidStateRaw(); + } catch (Throwable e) { + SubstrateForeignUtil.logExceptionSeen(e); + throw ExceptionPathNode.endClusterExceptionPath(e, scope); + } + } + RegularPathNode.endClusterNormalPath(scope); + } + // avoid any optimization based code duplication in the cluster, it disrupts later matching + // of control flow when searching for this pattern + GraalDirectives.controlFlowAnchor(); + } +} diff --git a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SubstrateMappedMemoryUtils.java b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SubstrateMappedMemoryUtils.java new file mode 100644 index 000000000000..5695ebb1e0a9 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SubstrateMappedMemoryUtils.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.foreign; + +import com.oracle.svm.core.NeverInline; +import com.oracle.svm.core.util.BasedOnJDKFile; + +public final class SubstrateMappedMemoryUtils { + + /** + * Similar to {@code java.nio.MappedMemoryUtils.load} but does not read a byte from each loaded + * page. This method can therefore safely be called from {@code ScopedMemoryAccess.loadInternal} + * because it never accesses the native memory directly. + * + * OpenJDK's original implementation reads the first byte of each page to ensure that the pages + * will be loaded into physical memory. We don't do so because this may cause unnecessary cache + * invalidations of actually needed memory. Also, we assume that those reads are only beneficial + * on some platforms we don't support anyway. + * + * This method must not be inlined because it does a JNI call and inlining it will most + * certainly exhaust the inlining budget such that other calls that need to be inlined (for + * correctness) cannot be inlined. + */ + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+14/src/java.base/share/classes/java/nio/MappedMemoryUtils.java#L50-L77") + @NeverInline("contains JNI call") + static void load(long address, boolean isSync, long size) { + // no need to load a sync mapped buffer + if (isSync) { + return; + } + if ((address == 0) || (size == 0)) { + return; + } + long offset = Target_java_nio_MappedMemoryUtils.mappingOffset(address); + long length = Target_java_nio_MappedMemoryUtils.mappingLength(offset, size); + Target_java_nio_MappedMemoryUtils.load0(Target_java_nio_MappedMemoryUtils.mappingAddress(address, offset), length); + } +} diff --git a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SyncCloseScopeOperation.java b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SyncCloseScopeOperation.java new file mode 100644 index 000000000000..8992993dada2 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/SyncCloseScopeOperation.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.foreign; + +import com.oracle.svm.core.heap.VMOperationInfos; +import com.oracle.svm.core.thread.JavaVMOperation; + +final class SyncCloseScopeOperation extends JavaVMOperation { + private final Target_jdk_internal_foreign_MemorySessionImpl session; + + SyncCloseScopeOperation(Target_jdk_internal_foreign_MemorySessionImpl session) { + super(VMOperationInfos.get(SyncCloseScopeOperation.class, "Sync MemorySessionImpl.close", SystemEffect.SAFEPOINT)); + this.session = session; + } + + @Override + protected void operate() { + assert session.state == Target_jdk_internal_foreign_MemorySessionImpl.CLOSED : "Must be closed"; + } +} diff --git a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_java_nio_MappedMemoryUtils.java b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_java_nio_MappedMemoryUtils.java new file mode 100644 index 000000000000..ced814ce992c --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_java_nio_MappedMemoryUtils.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.foreign; + +import java.io.FileDescriptor; + +import com.oracle.svm.core.NeverInline; +import com.oracle.svm.core.annotate.Alias; +import com.oracle.svm.core.annotate.AnnotateOriginal; +import com.oracle.svm.core.annotate.RecomputeFieldValue; +import com.oracle.svm.core.annotate.RecomputeFieldValue.Kind; +import com.oracle.svm.core.annotate.TargetClass; + +import jdk.internal.access.foreign.MappedMemoryUtilsProxy; + +@TargetClass(className = "java.nio.MappedMemoryUtils") +public final class Target_java_nio_MappedMemoryUtils { + @Alias // + @RecomputeFieldValue(isFinal = true, kind = Kind.None) // + static MappedMemoryUtilsProxy PROXY; + + @Alias + static native long mappingOffset(long address); + + @Alias + static native long mappingLength(long mappingOffset, long length); + + @Alias + static native long mappingAddress(long address, long mappingOffset); + + /* + * Methods 'isLoaded', 'unload', and 'force' must not be inlined because they eventually do a + * JNI call and inlining those will most certainly exhaust the inlining budget such that other + * calls that need to be inlined (for correctness) cannot be inlined. Then, we will fail the + * verification in SubstrateOptimizeSharedArenaAccessPhase#enumerateScopedAccesses. + */ + @AnnotateOriginal + @NeverInline("avoid inlining of JNI call") + static native boolean isLoaded(long address, boolean isSync, long size); + + @AnnotateOriginal + @NeverInline("avoid inlining of JNI call") + static native void unload(long address, boolean isSync, long size); + + @AnnotateOriginal + @NeverInline("avoid inlining of JNI call") + static native void force(FileDescriptor fd, long address, boolean isSync, long index, long length); + + @Alias + static native void load0(long address, long length); +} diff --git a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_foreign_MemorySessionImpl.java b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_foreign_MemorySessionImpl.java new file mode 100644 index 000000000000..5c8c91d01c51 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_foreign_MemorySessionImpl.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.foreign; + +import com.oracle.svm.core.annotate.Alias; +import com.oracle.svm.core.annotate.RecomputeFieldValue; +import com.oracle.svm.core.annotate.RecomputeFieldValue.Kind; +import com.oracle.svm.core.annotate.TargetClass; +import com.oracle.svm.core.jdk.JDKLatest; + +@TargetClass(className = "jdk.internal.foreign.MemorySessionImpl", onlyWith = {JDKLatest.class, ForeignAPIPredicates.Enabled.class}) +final class Target_jdk_internal_foreign_MemorySessionImpl { + @Alias // + int state; + + @Alias // + @RecomputeFieldValue(isFinal = true, kind = Kind.None) // + static int CLOSED; +} diff --git a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_misc_ScopedMemoryAccess.java b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_misc_ScopedMemoryAccess.java index f2e30655d7e7..4d36f0448230 100644 --- a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_misc_ScopedMemoryAccess.java +++ b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_misc_ScopedMemoryAccess.java @@ -24,63 +24,200 @@ */ package com.oracle.svm.core.foreign; -import static com.oracle.svm.core.util.VMError.unsupportedFeature; +import java.io.FileDescriptor; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.ref.Reference; +import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.ArenaIntrinsics; import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; import com.oracle.svm.core.annotate.TargetElement; import com.oracle.svm.core.jdk.JDK21OrEarlier; import com.oracle.svm.core.jdk.JDKLatest; +import com.oracle.svm.core.nodes.foreign.MemoryArenaValidInScopeNode; import com.oracle.svm.core.util.BasedOnJDKFile; +import jdk.internal.access.foreign.MappedMemoryUtilsProxy; import jdk.internal.foreign.MemorySessionImpl; +import jdk.internal.misc.ScopedMemoryAccess; +import jdk.internal.misc.ScopedMemoryAccess.ScopedAccessError; /** - * Gracefully handle unsupported features. + * Support for shared arenas on SVM: + *

+ * Shared arenas are implemented at safepoints in SVM. A shared arena can only be closed at a + * safepoint, ensuring that any other thread using the closed arena will only see the closure at a + * safepoint. To achieve this, SVM relies on compiler support. When accessing scoped memory (marked + * with {@link MemoryArenaValidInScopeNode}), all dominated accesses within that scope will check + * for closed arenas. + *

+ * To avoid eager creation of scope checks on all usages, we introduce validity checks + * ({@link MemorySessionImpl#checkValidStateRaw()}) at points guaranteed to be followed by a + * safepoint. For proper exception control flow when accessing a closed session, we use + * substitutions of methods in {@link ScopedMemoryAccess}. Each scoped access is substituted with + * the pattern + * {@link SubstrateForeignUtil#sessionExceptionHandler(MemorySessionImpl, Object, long)}, which + * marks the entry of a scoped region. This code must dominate all other code in the scoped method; + * otherwise, dominance problems may occur. Note that this constraint is not enforced, and modifying + * the generated code without understanding the implications may render it unschedulable. + *

+ * The {@link ArenaIntrinsics#checkValidArenaInScope(MemorySessionImpl, Object, long)} call signals + * the compiler to check all naturally dominated memory accesses and kills. Every such access will + * then be checked for session validity, ensuring that all session objects requiring scope checks + * are properly validated. + *

+ * The logic for code duplication, dominance, and node state checking is implemented in + * {@link com.oracle.svm.hosted.foreign.phases.SubstrateOptimizeSharedArenaAccessPhase}. + *

+ * Two assumptions are made about the @Scoped-annotated methods in the original JDK class: + *

    + *
  1. They must follow a specific code pattern (see + * {@code com.oracle.svm.hosted.phases.SharedGraphBuilderPhase.SharedBytecodeParser#instrumentScopedMethod}). + * If the pattern changes, consider adding an explicit substitution or adapting the parser.
  2. + *
  3. The actual operation using the native address (e.g., {@code Unsafe.setMemory}) will be + * recursively inlined, allowing the compiler phase to see all safepoints. However, some calls may + * remain (e.g., due to explicit @NeverInline annotations). These remaining calls must not access + * the native memory associated with the session. As this is verified, such remaining calls must be + * explicitly allowed by registering them in + * {@code com.oracle.svm.hosted.foreign.ForeignFunctionsFeature#initSafeArenaAccessors}.
  4. + *
+ * + * @noinspection CaughtExceptionImmediatelyRethrown */ @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-24+15/src/java.base/share/classes/jdk/internal/misc/X-ScopedMemoryAccess-bin.java.template") @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+13/src/java.base/share/classes/jdk/internal/misc/X-ScopedMemoryAccess.java.template") @TargetClass(className = "jdk.internal.misc.ScopedMemoryAccess", onlyWith = ForeignAPIPredicates.Enabled.class) public final class Target_jdk_internal_misc_ScopedMemoryAccess { + @Substitute static void registerNatives() { } + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+20/src/java.base/share/classes/java/nio/MappedMemoryUtils.java#L50-L77") + @SuppressWarnings("static-method") + @Substitute + @Target_jdk_internal_misc_ScopedMemoryAccess_Scoped + @TargetElement(onlyWith = JDKLatest.class) + @AlwaysInline("Safepoints must be visible in caller") + public void loadInternal(MemorySessionImpl session, MappedMemoryUtilsProxy mappedUtils, long address, boolean isSync, long size) { + SubstrateForeignUtil.checkIdentity(mappedUtils, Target_java_nio_MappedMemoryUtils.PROXY); + try { + SubstrateForeignUtil.sessionExceptionHandler(session, null, address); + try { + SubstrateForeignUtil.checkSession(session); + SubstrateMappedMemoryUtils.load(address, isSync, size); + } finally { + Reference.reachabilityFence(session); + } + } catch (ScopedAccessError e) { + throw e; + } + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+14/src/java.base/share/classes/java/nio/MappedMemoryUtils.java#L182-L185") + @SuppressWarnings("static-method") + @Substitute + @Target_jdk_internal_misc_ScopedMemoryAccess_Scoped + @TargetElement(onlyWith = JDKLatest.class) + @AlwaysInline("Safepoints must be visible in caller") + public boolean isLoadedInternal(MemorySessionImpl session, MappedMemoryUtilsProxy mappedUtils, long address, boolean isSync, long size) { + SubstrateForeignUtil.checkIdentity(mappedUtils, Target_java_nio_MappedMemoryUtils.PROXY); + try { + SubstrateForeignUtil.sessionExceptionHandler(session, null, address); + try { + SubstrateForeignUtil.checkSession(session); + // originally: 'mappedUtils.isLoaded(address, isSync, size)' + return Target_java_nio_MappedMemoryUtils.isLoaded(address, isSync, size); + } finally { + Reference.reachabilityFence(session); + } + } catch (ScopedAccessError e) { + throw e; + } + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+14/src/java.base/share/classes/java/nio/MappedMemoryUtils.java#L192-L195") + @SuppressWarnings("static-method") + @Substitute + @Target_jdk_internal_misc_ScopedMemoryAccess_Scoped + @TargetElement(onlyWith = JDKLatest.class) + @AlwaysInline("Safepoints must be visible in caller") + public void unloadInternal(MemorySessionImpl session, MappedMemoryUtilsProxy mappedUtils, long address, boolean isSync, long size) { + SubstrateForeignUtil.checkIdentity(mappedUtils, Target_java_nio_MappedMemoryUtils.PROXY); + try { + SubstrateForeignUtil.sessionExceptionHandler(session, null, address); + try { + SubstrateForeignUtil.checkSession(session); + + // originally: 'mappedUtils.unload(address, isSync, size)' + Target_java_nio_MappedMemoryUtils.unload(address, isSync, size); + } finally { + Reference.reachabilityFence(session); + } + } catch (ScopedAccessError e) { + throw e; + } + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+14/src/java.base/share/classes/java/nio/MappedMemoryUtils.java#L197-L200") + @SuppressWarnings("static-method") + @Substitute + @Target_jdk_internal_misc_ScopedMemoryAccess_Scoped + @TargetElement(onlyWith = JDKLatest.class) + @AlwaysInline("Safepoints must be visible in caller") + public void forceInternal(MemorySessionImpl session, MappedMemoryUtilsProxy mappedUtils, FileDescriptor fd, long address, boolean isSync, long index, long length) { + SubstrateForeignUtil.checkIdentity(mappedUtils, Target_java_nio_MappedMemoryUtils.PROXY); + try { + SubstrateForeignUtil.sessionExceptionHandler(session, null, address); + try { + SubstrateForeignUtil.checkSession(session); + + // originally: 'mappedUtils.force(fd, address, isSync, index, length);' + Target_java_nio_MappedMemoryUtils.force(fd, address, isSync, index, length); + } finally { + Reference.reachabilityFence(session); + } + } catch (ScopedAccessError e) { + throw e; + } + } + /** - * Performs a thread-local handshake - * - *
-     * {@code
-     * JVM_ENTRY(jboolean, ScopedMemoryAccess_closeScope(JNIEnv *env, jobject receiver, jobject deopt, jobject exception))
-     *   CloseScopedMemoryClosure cl(deopt, exception);
-     *   Handshake::execute(&cl);
-     *   return !cl._found;
-     * JVM_END
-     * }
-     * 
- * - * CloseScopedMemoryClosure can be summarised as follows: Each thread checks the - * last max_critical_stack_depth (fixed to 10) frames of its own stack trace. If it - * contains any @Scoped-annotated method called on the sessions being freed, it - * sets _found to true. + * This method synchronizes with all other Java threads in order to be able to safely close the + * session. *

- * See scopedMemoryAccess.cpp in HotSpot. + * On HotSpot, a thread-local handshake (i.e. {@code CloseScopedMemoryClosure}) with all other + * Java threads is performed. {@code CloseScopedMemoryClosure} can be summarised as follows: + * Each thread checks the last max_critical_stack_depth (fixed to 10) virtual + * frames of its own stack trace. If it contains any @Scoped-annotated method + * called on the sessions being freed, it installs an async exception (a ScopedAccessError + * provided as argument). *

- * As one might notice, what is not supported is not creating shared arenas, but closing them. + * In our case, we will force a safepoint to synchronize all threads. The VM operation (i.e. + * {@link SyncCloseScopeOperation}) is essentially an empty operation but kills the field + * location of {@link Target_jdk_internal_foreign_MemorySessionImpl#state}. */ + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+20/src/hotspot/share/prims/scopedMemoryAccess.cpp#L215-L218") + @SuppressWarnings("static-method") @Substitute @TargetElement(onlyWith = JDKLatest.class) - void closeScope0(MemorySessionImpl session, Target_jdk_internal_misc_ScopedMemoryAccess_ScopedAccessError error) { - throw unsupportedFeature("GR-52276: Arena.ofShared not supported"); + void closeScope0(Target_jdk_internal_foreign_MemorySessionImpl session, @SuppressWarnings("unused") Target_jdk_internal_misc_ScopedMemoryAccess_ScopedAccessError error) { + new SyncCloseScopeOperation(session).enqueue(); } + @SuppressWarnings("static-method") @Substitute @TargetElement(onlyWith = JDK21OrEarlier.class) - boolean closeScope0(MemorySessionImpl session) { - throw unsupportedFeature("GR-52276: Arena.ofShared not supported"); + boolean closeScope0(Target_jdk_internal_foreign_MemorySessionImpl session) { + new SyncCloseScopeOperation(session).enqueue(); + return true; } } -@TargetClass(className = "jdk.internal.misc.ScopedMemoryAccess$ScopedAccessError", onlyWith = {JDKLatest.class, ForeignAPIPredicates.Enabled.class}) -final class Target_jdk_internal_misc_ScopedMemoryAccess_ScopedAccessError { +@Retention(RetentionPolicy.RUNTIME) +@TargetClass(className = "jdk.internal.misc.ScopedMemoryAccess$Scoped", onlyWith = {JDKLatest.class, ForeignAPIPredicates.Enabled.class}) +@interface Target_jdk_internal_misc_ScopedMemoryAccess_Scoped { + } diff --git a/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_misc_ScopedMemoryAccess_ScopedAccessError.java b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_misc_ScopedMemoryAccess_ScopedAccessError.java new file mode 100644 index 000000000000..6732476dc417 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.foreign/src/com/oracle/svm/core/foreign/Target_jdk_internal_misc_ScopedMemoryAccess_ScopedAccessError.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.foreign; + +import com.oracle.svm.core.annotate.TargetClass; +import com.oracle.svm.core.jdk.JDKLatest; + +@TargetClass(className = "jdk.internal.misc.ScopedMemoryAccess$ScopedAccessError", onlyWith = {JDKLatest.class, ForeignAPIPredicates.Enabled.class}) +final class Target_jdk_internal_misc_ScopedMemoryAccess_ScopedAccessError { +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/ArenaIntrinsics.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/ArenaIntrinsics.java new file mode 100644 index 000000000000..05d70cad492e --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/ArenaIntrinsics.java @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core; + +import com.oracle.svm.core.nodes.foreign.MemoryArenaValidInScopeNode; + +import jdk.internal.foreign.MemorySessionImpl; + +/** + * Intrinsification happening in SubstrateGraphBuilderPlugins when the method calls to these callees + * are parsed. + */ +public class ArenaIntrinsics { + + /** + * Checks if the provided memory session is valid within its current scope. + * + * This method serves as a compiler intrinsic to ensure that dominated code accessing memory + * arenas/sessions adheres to the semantics of shared arenas. It allows the compiler to + * guarantee proper checks for closed shared arenas in concurrent access scenarios. + * + * To utilize this method effectively, follow the recommended pattern: + * + *

+     * if (checkValidArenaInScope(session, base, offset) != 0) {
+     *     checkArena();
+     * }
+     * 
+ * + * The returned numeric value indicates whether the associated intrinsic + * {@link MemoryArenaValidInScopeNode} is present in the graph. A non-zero value signifies its + * presence, whereas zero denotes its absence. Note that the actual values hold no significance + * and should not be interpreted directly. Instead, they facilitate modeling data dependencies + * during compilation in IR. Once the optimizer completes processing scoped memory accesses, the + * value is replaced with a constant zero, enabling easy cleanup and folding of dependent + * control flow. + * + * If the arena scope is removed from the graph, the compiler sets the arena value to a constant + * zero, resulting in the removal of dominated code. + * + * @param session the memory session to validate + * @param base the base object associated with the memory session + * @param offset the offset within the base object + * @return a numeric value indicating the validity of the memory session within its scope + */ + @SuppressWarnings("unused") + public static long checkArenaValidInScope(MemorySessionImpl session, Object base, long offset) { + // will be intrinsifed to have an exception branch + return 0; + } + +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java index 03cd84231bd7..5f2dff7adf90 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java @@ -1426,4 +1426,13 @@ public static class TruffleStableOptions { 2. All @CEntryPoint definitions in classes loaded by the custom loader are processed. 3. All @TargetClass substitutions in classes loaded by the custom loader are processed.""")// public static final HostedOptionKey LibGraalClassLoader = new HostedOptionKey<>(""); + + @Option(help = "Flag indicating if the code checking for closed arenas should print to stdout when it sees an exception.", type = OptionType.Debug)// + public static final HostedOptionKey PrintClosedArenaUponThrow = new HostedOptionKey<>(false); + + @Fold + public static boolean printClosedArenaUponThrow() { + return PrintClosedArenaUponThrow.getValue(); + } + } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SafepointSnippets.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SafepointSnippets.java index 5963e504addc..7922e03f0a9b 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SafepointSnippets.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SafepointSnippets.java @@ -39,6 +39,7 @@ import com.oracle.svm.core.graal.meta.SubstrateForeignCallsProvider; import com.oracle.svm.core.meta.SharedMethod; import com.oracle.svm.core.nodes.SafepointCheckNode; +import com.oracle.svm.core.nodes.foreign.MemoryArenaValidInScopeNode; import com.oracle.svm.core.thread.SafepointCheckCounter; import com.oracle.svm.core.thread.SafepointSlowpath; @@ -49,6 +50,7 @@ import jdk.graal.compiler.graph.Node.ConstantNodeParameter; import jdk.graal.compiler.graph.Node.NodeIntrinsic; import jdk.graal.compiler.nodeinfo.Verbosity; +import jdk.graal.compiler.nodes.FieldLocationIdentity; import jdk.graal.compiler.nodes.SafepointNode; import jdk.graal.compiler.nodes.extended.BranchProbabilityNode; import jdk.graal.compiler.nodes.extended.ForeignCallNode; @@ -59,14 +61,16 @@ import jdk.graal.compiler.replacements.SnippetTemplate.Arguments; import jdk.graal.compiler.replacements.SnippetTemplate.SnippetInfo; import jdk.graal.compiler.replacements.Snippets; +import jdk.vm.ci.meta.ResolvedJavaField; public final class SafepointSnippets extends SubstrateTemplates implements Snippets { + private final SnippetInfo safepoint; SafepointSnippets(OptionValues options, Providers providers, Map, NodeLoweringProvider> lowerings) { super(options, providers); - this.safepoint = snippet(providers, SafepointSnippets.class, "safepointSnippet", getKilledLocations()); + this.safepoint = snippet(providers, SafepointSnippets.class, "safepointSnippet", getKilledLocations(providers.getMetaAccess().lookupJavaField(MemoryArenaValidInScopeNode.STATE_FIELD))); lowerings.put(SafepointNode.class, new SafepointLowering()); } @@ -78,10 +82,11 @@ private static void safepointSnippet() { } } - private static LocationIdentity[] getKilledLocations() { - int newLength = GC_LOCATIONS.length + 1; + private static LocationIdentity[] getKilledLocations(ResolvedJavaField memorySessionImplStateField) { + int newLength = GC_LOCATIONS.length + 2; LocationIdentity[] locations = Arrays.copyOf(GC_LOCATIONS, newLength); - locations[newLength - 1] = SafepointCheckCounter.getLocationIdentity(); + locations[newLength - 2] = SafepointCheckCounter.getLocationIdentity(); + locations[newLength - 1] = new FieldLocationIdentity(memorySessionImplStateField); return locations; } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/ClusterNode.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/ClusterNode.java new file mode 100644 index 000000000000..75793ff90867 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/ClusterNode.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.nodes; + +import jdk.graal.compiler.nodes.FixedNode; +import jdk.graal.compiler.nodes.StructuredGraph; +import jdk.graal.compiler.nodes.debug.ControlFlowAnchored; + +/** + * Interface for all nodes that are used to mark graph clusters. A graph cluster is a given part of + * a {@link StructuredGraph}. It's a loose definition of a set of nodes interconnected via fixed + * nodes. Clusters typically have entry nodes that mark the beginning of a cluster and exit nodes + * that mark the end of a cluster. All {@link FixedNode} in between are considered part of a + * cluster. All floating nodes which have inputs (transitively) that are inside the cluster are part + * of the cluster. Clusters can be used to duplicate certain parts of a graph and insert them at + * other positions. + */ +public interface ClusterNode extends ControlFlowAnchored { + void delete(); +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/MemoryArenaValidInScopeNode.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/MemoryArenaValidInScopeNode.java new file mode 100644 index 000000000000..597023197131 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/MemoryArenaValidInScopeNode.java @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2019, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.nodes.foreign; + +import static jdk.graal.compiler.nodeinfo.InputType.Memory; + +import java.lang.reflect.Field; + +import org.graalvm.word.LocationIdentity; + +import com.oracle.svm.core.nodes.ClusterNode; +import com.oracle.svm.util.ReflectionUtil; + +import jdk.graal.compiler.core.common.type.StampFactory; +import jdk.graal.compiler.graph.Node; +import jdk.graal.compiler.graph.NodeClass; +import jdk.graal.compiler.graph.NodeInputList; +import jdk.graal.compiler.nodeinfo.NodeCycles; +import jdk.graal.compiler.nodeinfo.NodeInfo; +import jdk.graal.compiler.nodeinfo.NodeSize; +import jdk.graal.compiler.nodes.ConstantNode; +import jdk.graal.compiler.nodes.FixedWithNextNode; +import jdk.graal.compiler.nodes.ValueNode; +import jdk.graal.compiler.nodes.debug.ControlFlowAnchored; +import jdk.graal.compiler.nodes.memory.MemoryAccess; +import jdk.graal.compiler.nodes.memory.MemoryKill; +import jdk.internal.foreign.MemorySessionImpl; +import jdk.internal.misc.ScopedMemoryAccess; + +/** + * See {@link ClusterNode} for details. + * + * Mark the beginning of a {@link ScopedMemoryAccess} checking the validity of a memory session. + */ +@NodeInfo(cycles = NodeCycles.CYCLES_UNKNOWN, size = NodeSize.SIZE_UNKNOWN) +public class MemoryArenaValidInScopeNode extends FixedWithNextNode implements MemoryAccess, ControlFlowAnchored { + public static final Field STATE_FIELD = ReflectionUtil.lookupField(MemorySessionImpl.class, "state"); + + public static final NodeClass TYPE = NodeClass.create(MemoryArenaValidInScopeNode.class); + @Node.OptionalInput ValueNode value; + private final LocationIdentity fieldLocation; + @OptionalInput(Memory) MemoryKill lastLocationAccess; + + public MemoryArenaValidInScopeNode(ValueNode value, LocationIdentity fieldLocation) { + super(TYPE, StampFactory.forInteger(64)); + this.value = value; + this.fieldLocation = fieldLocation; + } + + @OptionalInput NodeInputList scopeAssociatedValues; + + public void addScopeAssociatedValue(ValueNode memorySession) { + if (this.scopeAssociatedValues == null) { + this.scopeAssociatedValues = new NodeInputList<>(this); + } + this.scopeAssociatedValues.add(memorySession); + } + + public ValueNode getValue() { + return value; + } + + public void delete(int val) { + this.replaceAtUsages(ConstantNode.forLong(val, graph())); + this.graph().removeFixed(this); + } + + @Override + public MemoryKill getLastLocationAccess() { + return lastLocationAccess; + } + + @Override + public void setLastLocationAccess(MemoryKill lla) { + updateUsagesInterface(lastLocationAccess, lla); + lastLocationAccess = lla; + } + + @Override + public LocationIdentity getLocationIdentity() { + return fieldLocation; + } + +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/ScopedMemExceptionHandlerClusterNode.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/ScopedMemExceptionHandlerClusterNode.java new file mode 100644 index 000000000000..60f6f79d3243 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/ScopedMemExceptionHandlerClusterNode.java @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.nodes.foreign; + +import static jdk.graal.compiler.nodeinfo.InputType.Memory; + +import org.graalvm.word.LocationIdentity; + +import com.oracle.svm.core.nodes.ClusterNode; + +import jdk.graal.compiler.core.common.type.Stamp; +import jdk.graal.compiler.core.common.type.StampFactory; +import jdk.graal.compiler.graph.Node; +import jdk.graal.compiler.graph.NodeClass; +import jdk.graal.compiler.nodeinfo.InputType; +import jdk.graal.compiler.nodeinfo.NodeCycles; +import jdk.graal.compiler.nodeinfo.NodeInfo; +import jdk.graal.compiler.nodeinfo.NodeSize; +import jdk.graal.compiler.nodes.FixedWithNextNode; +import jdk.graal.compiler.nodes.NodeView; +import jdk.graal.compiler.nodes.StructuredGraph; +import jdk.graal.compiler.nodes.ValueNode; +import jdk.graal.compiler.nodes.debug.SideEffectNode; +import jdk.graal.compiler.nodes.memory.MemoryAccess; +import jdk.graal.compiler.nodes.memory.MemoryKill; +import jdk.graal.compiler.nodes.memory.SingleMemoryKill; +import jdk.graal.compiler.nodes.spi.LIRLowerable; +import jdk.graal.compiler.nodes.spi.NodeLIRBuilderTool; +import jdk.graal.compiler.nodes.util.GraphUtil; +import jdk.internal.foreign.MemorySessionImpl; +import jdk.internal.misc.ScopedMemoryAccess; +import jdk.internal.misc.ScopedMemoryAccess.ScopedAccessError; + +@NodeInfo(cycles = NodeCycles.CYCLES_UNKNOWN, size = NodeSize.SIZE_UNKNOWN) +public abstract class ScopedMemExceptionHandlerClusterNode extends FixedWithNextNode implements ClusterNode { + public static final NodeClass TYPE = NodeClass.create(ScopedMemExceptionHandlerClusterNode.class); + + public ScopedMemExceptionHandlerClusterNode(NodeClass c, Stamp stamp) { + super(c, stamp); + } + + /** + * See {@link ClusterNode} for details. + * + * Mark the beginning of the non exception path of the exception cluster for a + * {@link ScopedMemoryAccess} checking the validity of a memory session. + */ + @NodeInfo(cycles = NodeCycles.CYCLES_UNKNOWN, size = NodeSize.SIZE_UNKNOWN) + public static class RegularPathNode extends ScopedMemExceptionHandlerClusterNode implements LIRLowerable { + + public static final NodeClass TYPE = NodeClass.create(RegularPathNode.class); + @Node.OptionalInput ValueNode scope; + + public RegularPathNode(ValueNode scope) { + super(TYPE, StampFactory.forVoid()); + this.scope = scope; + } + + public ValueNode getScope() { + return scope; + } + + @Override + public void generate(NodeLIRBuilderTool generator) { + // nothing to do + } + + @NodeIntrinsic + public static native void endClusterNormalPath(long scope); + + @Override + public void delete() { + GraphUtil.unlinkFixedNode(this); + this.safeDelete(); + } + } + + /** + * See {@link ClusterNode} for details. + * + * Mark the inputs of an exception cluster for a {@link ScopedMemoryAccess} checking the + * validity of a memory session. + */ + @NodeInfo(cycles = NodeCycles.CYCLES_UNKNOWN, size = NodeSize.SIZE_UNKNOWN) + public static class ExceptionInputNode extends ScopedMemExceptionHandlerClusterNode implements LIRLowerable { + + public static final NodeClass TYPE = NodeClass.create(ExceptionInputNode.class); + + /** + * A link to the {@link MemoryArenaValidInScopeNode} that allows easy access to the scoped + * access dominating this exception handler. + */ + @Node.OptionalInput ValueNode scope; + /** + * A link to the {@code MemorySessionImpl} associated with the scoped access. + */ + @Node.OptionalInput ValueNode input; + + public ExceptionInputNode(ValueNode scope, ValueNode input) { + super(TYPE, input.stamp(NodeView.DEFAULT)); + this.input = input; + this.scope = scope; + } + + public ValueNode getInput() { + return input; + } + + @Override + public void generate(NodeLIRBuilderTool generator) { + // nothing to do + } + + @NodeIntrinsic + public static native MemorySessionImpl clusterInputValue(long scope, MemorySessionImpl value); + + @Override + public void delete() { + this.replaceAtUsages(input); + GraphUtil.unlinkFixedNode(this); + this.safeDelete(); + } + } + + /** + * See {@link ClusterNode} for details. + * + * Mark the beginning of the exception handler of the exception cluster for a + * {@link ScopedMemoryAccess} checking the validity of a memory session. + */ + @NodeInfo(cycles = NodeCycles.CYCLES_UNKNOWN, size = NodeSize.SIZE_UNKNOWN, allowedUsageTypes = InputType.Memory) + public static class ExceptionPathNode extends ScopedMemExceptionHandlerClusterNode implements LIRLowerable, SingleMemoryKill, MemoryAccess { + + public static final NodeClass TYPE = NodeClass.create(ExceptionPathNode.class); + + @Node.OptionalInput ValueNode exception; + @Node.OptionalInput ValueNode scope; + + public ExceptionPathNode(ValueNode exception, ValueNode scope) { + super(TYPE, exception.stamp(NodeView.DEFAULT)); + this.exception = exception; + this.scope = scope; + } + + public ValueNode getException() { + return exception; + } + + public ValueNode getScope() { + return scope; + } + + @Override + public void generate(NodeLIRBuilderTool generator) { + // nothing to do + } + + @NodeIntrinsic + public static native ScopedAccessError endClusterExceptionPath(Throwable t, long scopeNode); + + @Override + public void delete() { + if (hasUsagesOfType(InputType.Memory) && lastLocationAccess == null) { + /* + * We might not know the next dominating kill of ANY. Do not bother finding it, add + * an artificial kill, we are in the exception path, perf is not relevant here but + * graph shape for correctness is. + */ + StructuredGraph g = graph(); + SideEffectNode sf = g.addWithoutUnique(new SideEffectNode()); + g.addBeforeFixed(this, sf); + replaceAtUsages(sf, Memory); + } + replaceAtUsages(exception); + GraphUtil.unlinkFixedNode(this); + this.safeDelete(); + } + + @Override + public LocationIdentity getKilledLocationIdentity() { + return LocationIdentity.ANY_LOCATION; + } + + @Override + public LocationIdentity getLocationIdentity() { + return getKilledLocationIdentity(); + } + + @OptionalInput(Memory) MemoryKill lastLocationAccess; + + @Override + public MemoryKill getLastLocationAccess() { + return lastLocationAccess; + } + + @Override + public void setLastLocationAccess(MemoryKill lla) { + updateUsagesInterface(lastLocationAccess, lla); + lastLocationAccess = lla; + } + + } + + /** + * See {@link ClusterNode} for details. + * + * Mark the beginning of an exception cluster for a {@link ScopedMemoryAccess} checking the + * validity of a memory session. + */ + @NodeInfo(cycles = NodeCycles.CYCLES_UNKNOWN, size = NodeSize.SIZE_UNKNOWN) + public static class ClusterBeginNode extends ScopedMemExceptionHandlerClusterNode implements LIRLowerable { + + public static final NodeClass TYPE = NodeClass.create(ClusterBeginNode.class); + + @Node.OptionalInput ValueNode scope; + + public ClusterBeginNode(ValueNode scope) { + super(TYPE, StampFactory.forVoid()); + this.scope = scope; + } + + @Override + public void generate(NodeLIRBuilderTool generator) { + // nothing to do + } + + @NodeIntrinsic + public static native void beginExceptionCluster(long scope); + + @Override + public void delete() { + GraphUtil.unlinkFixedNode(this); + this.safeDelete(); + } + } +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/ScopedMethodNode.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/ScopedMethodNode.java new file mode 100644 index 000000000000..44cc9bf33cef --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/nodes/foreign/ScopedMethodNode.java @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2019, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.nodes.foreign; + +import com.oracle.svm.core.nodes.ClusterNode; + +import jdk.graal.compiler.core.common.type.StampFactory; +import jdk.graal.compiler.graph.Node; +import jdk.graal.compiler.graph.NodeClass; +import jdk.graal.compiler.nodeinfo.InputType; +import jdk.graal.compiler.nodeinfo.NodeCycles; +import jdk.graal.compiler.nodeinfo.NodeInfo; +import jdk.graal.compiler.nodeinfo.NodeSize; +import jdk.graal.compiler.nodeinfo.Verbosity; +import jdk.graal.compiler.nodes.FixedWithNextNode; +import jdk.graal.compiler.nodes.util.GraphUtil; + +/** + * Describes a {@code jdk.internal.misc.ScopedMemoryAccess$Scoped} function scope. Used to mark the + * beginning and end of such a function. Enables the compiler to perform certain verification for + * exactly the scope of the {@code Scope} annotated method even in the presence of inlining. + */ +@NodeInfo(cycles = NodeCycles.CYCLES_IGNORED, size = NodeSize.SIZE_IGNORED, allowedUsageTypes = InputType.Association, nameTemplate = "Invoke#{p#type/s}") +public class ScopedMethodNode extends FixedWithNextNode implements ClusterNode { + public static final NodeClass TYPE = NodeClass.create(ScopedMethodNode.class); + + @Node.OptionalInput(InputType.Association) ScopedMethodNode start; + + public enum Type { + START, + END + } + + private final Type type; + + public ScopedMethodNode() { + super(TYPE, StampFactory.forVoid()); + this.type = Type.START; + } + + public ScopedMethodNode(ScopedMethodNode start) { + super(TYPE, StampFactory.forVoid()); + this.type = Type.END; + this.start = start; + } + + public ScopedMethodNode getStart() { + return start; + } + + public Type getType() { + return type; + } + + @Override + public void delete() { + if (type == Type.START) { + replaceAtUsages(null); + } + GraphUtil.unlinkFixedNode(this); + this.safeDelete(); + } + + @Override + public String toString(Verbosity verbosity) { + String s = super.toString(verbosity); + if (verbosity == Verbosity.Long) { + s += "#" + type; + } + return s; + } +} diff --git a/substratevm/src/com.oracle.svm.hosted.foreign/src/com/oracle/svm/hosted/foreign/ForeignFunctionsFeature.java b/substratevm/src/com.oracle.svm.hosted.foreign/src/com/oracle/svm/hosted/foreign/ForeignFunctionsFeature.java index 816140bd754b..4d797effd44a 100644 --- a/substratevm/src/com.oracle.svm.hosted.foreign/src/com/oracle/svm/hosted/foreign/ForeignFunctionsFeature.java +++ b/substratevm/src/com.oracle.svm.hosted.foreign/src/com/oracle/svm/hosted/foreign/ForeignFunctionsFeature.java @@ -27,6 +27,7 @@ import static java.lang.invoke.MethodHandles.exactInvoker; import static java.lang.invoke.MethodHandles.insertArguments; +import java.io.FileDescriptor; import java.lang.constant.DirectMethodHandleDesc; import java.lang.constant.DirectMethodHandleDesc.Kind; import java.lang.foreign.FunctionDescriptor; @@ -47,8 +48,10 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; import java.util.function.UnaryOperator; +import org.graalvm.collections.EconomicSet; import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; @@ -59,6 +62,7 @@ import org.graalvm.nativeimage.impl.ConfigurationCondition; import org.graalvm.nativeimage.impl.RuntimeForeignAccessSupport; +import com.oracle.graal.pointsto.meta.AnalysisMetaAccess; import com.oracle.graal.pointsto.meta.AnalysisMethod; import com.oracle.graal.pointsto.meta.AnalysisUniverse; import com.oracle.svm.configure.ConfigurationFile; @@ -67,6 +71,7 @@ import com.oracle.svm.core.OS; import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.SubstrateUtil; +import com.oracle.svm.core.code.FactoryMethodHolder; import com.oracle.svm.core.configure.ConfigurationFiles; import com.oracle.svm.core.feature.AutomaticallyRegisteredFeature; import com.oracle.svm.core.feature.InternalFeature; @@ -76,6 +81,8 @@ import com.oracle.svm.core.foreign.LinkToNativeSupportImpl; import com.oracle.svm.core.foreign.NativeEntryPointInfo; import com.oracle.svm.core.foreign.RuntimeSystemLookup; +import com.oracle.svm.core.foreign.SubstrateMappedMemoryUtils; +import com.oracle.svm.core.foreign.Target_java_nio_MappedMemoryUtils; import com.oracle.svm.core.graal.meta.SubstrateForeignCallsProvider; import com.oracle.svm.core.meta.MethodPointer; import com.oracle.svm.core.util.BasedOnJDKFile; @@ -83,20 +90,32 @@ import com.oracle.svm.core.util.VMError; import com.oracle.svm.hosted.ConditionalConfigurationRegistry; import com.oracle.svm.hosted.FeatureImpl; +import com.oracle.svm.hosted.FeatureImpl.BeforeAnalysisAccessImpl; import com.oracle.svm.hosted.ImageClassLoader; import com.oracle.svm.hosted.ProgressReporter; +import com.oracle.svm.hosted.SharedArenaSupport; import com.oracle.svm.hosted.code.CEntryPointData; import com.oracle.svm.hosted.config.ConfigurationParserUtils; +import com.oracle.svm.hosted.foreign.phases.SubstrateOptimizeSharedArenaAccessPhase; import com.oracle.svm.util.LogUtils; import com.oracle.svm.util.ModuleSupport; import com.oracle.svm.util.ReflectionUtil; import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.debug.GraalError; +import jdk.graal.compiler.phases.BasePhase; +import jdk.graal.compiler.phases.PhaseSuite; +import jdk.graal.compiler.phases.common.CanonicalizerPhase; +import jdk.graal.compiler.phases.common.IterativeConditionalEliminationPhase; +import jdk.graal.compiler.phases.tiers.MidTierContext; import jdk.graal.compiler.serviceprovider.JavaVersionUtil; +import jdk.internal.foreign.MemorySessionImpl; import jdk.internal.foreign.abi.AbstractLinker; import jdk.internal.foreign.abi.LinkerOptions; +import jdk.internal.misc.ScopedMemoryAccess.ScopedAccessError; import jdk.vm.ci.meta.MetaAccessProvider; import jdk.vm.ci.meta.ResolvedJavaMethod; +import jdk.vm.ci.meta.ResolvedJavaType; @AutomaticallyRegisteredFeature @Platforms(Platform.HOSTED_ONLY.class) @@ -104,6 +123,9 @@ public class ForeignFunctionsFeature implements InternalFeature { private static final Map REQUIRES_CONCEALED = Map.of( "jdk.internal.vm.ci", new String[]{"jdk.vm.ci.code", "jdk.vm.ci.meta", "jdk.vm.ci.amd64", "jdk.vm.ci.aarch64"}, "java.base", new String[]{ + "jdk.internal.access.foreign", + "jdk.internal.misc", + "jdk.internal.util", "jdk.internal.foreign", "jdk.internal.foreign.abi", "jdk.internal.foreign.abi.aarch64", @@ -126,6 +148,10 @@ public class ForeignFunctionsFeature implements InternalFeature { private final Set registeredDirectUpcalls = ConcurrentHashMap.newKeySet(); private int directUpcallCount = -1; + private final EconomicSet neverAccessesSharedArena = EconomicSet.create(); + + private final EconomicSet neverAccessesSharedArenaMethods = EconomicSet.create(); + @Fold public static ForeignFunctionsFeature singleton() { return ImageSingletons.lookup(ForeignFunctionsFeature.class); @@ -214,7 +240,28 @@ public void registerForDirectUpcall(ConfigurationCondition condition, MethodHand } } - ForeignFunctionsFeature() { + private final class SharedArenaSupportImpl implements SharedArenaSupport { + + @Override + public BasePhase createOptimizeSharedArenaAccessPhase() { + PhaseSuite sharedArenaPhases = new PhaseSuite<>(); + sharedArenaPhases.appendPhase(new SubstrateOptimizeSharedArenaAccessPhase(CanonicalizerPhase.create())); + /* + * After we injected all necessary scope wide session checks we need to cleanup any new, + * potentially repetitive, control flow logic. + */ + sharedArenaPhases.appendPhase(new IterativeConditionalEliminationPhase(CanonicalizerPhase.create(), false)); + sharedArenaPhases.appendPhase(CanonicalizerPhase.create()); + return sharedArenaPhases; + } + + @Override + public void registerSafeArenaAccessorClass(AnalysisMetaAccess metaAccess, Class klass) { + ForeignFunctionsFeature.this.registerSafeArenaAccessorClass(metaAccess, klass); + } + } + + protected ForeignFunctionsFeature() { /* * We intentionally add these exports in the constructor to avoid access errors from plugins * when the feature is disabled in the config. @@ -242,6 +289,7 @@ public void duringSetup(DuringSetupAccess a) { ImageSingletons.add(ForeignFunctionsRuntime.class, new ForeignFunctionsRuntime()); ImageSingletons.add(RuntimeForeignAccessSupport.class, accessSupport); ImageSingletons.add(LinkToNativeSupport.class, new LinkToNativeSupportImpl()); + ImageSingletons.add(SharedArenaSupport.class, new SharedArenaSupportImpl()); ImageClassLoader imageClassLoader = access.getImageClassLoader(); ConfigurationParserUtils.parseAndRegisterConfigurations(getConfigurationParser(imageClassLoader), imageClassLoader, "panama foreign", @@ -524,6 +572,70 @@ public void beforeAnalysis(BeforeAnalysisAccess a) { directUpcallCount = 0; } ProgressReporter.singleton().setForeignFunctionsInfo(getCreatedDowncallStubsCount(), getCreatedUpcallStubsCount()); + + /* + * Even if there is no instance of MemorySessionImpl, we will kill the field location of + * 'MemorySessionImpl.state' which may trigger registration of the declaring type after the + * analysis universe was sealed. So, we eagerly register the field as accessed. + */ + access.registerAsRead(ReflectionUtil.lookupField(MemorySessionImpl.class, "state"), "field location is killed after safepoint"); + try { + initSafeArenaAccessors(access); + } catch (Throwable t) { + throw GraalError.shouldNotReachHere(t); + } + } + + /** + * Remember a set of known methods that frequently appear in scoped memory access methods as + * callees. Not all of those callees have to be inlined because some of them are SVM specific + * and are known to never access a (potentially already closed) memory session. Thus, such + * callees can be excluded during verification. + */ + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+14/src/java.base/share/classes/java/nio/MappedMemoryUtils.java") + protected void initSafeArenaAccessors(BeforeAnalysisAccessImpl access) throws NoSuchMethodException { + MetaAccessProvider metaAccess = access.getMetaAccess(); + + registerSafeArenaAccessorClass(metaAccess, FactoryMethodHolder.class); + registerSafeArenaAccessorClass(metaAccess, LogUtils.class); + + /* + * Some methods that are normally part of the exception handler code for the calls to + * checkValidStateRaw + */ + registerSafeArenaAccessorMethod(metaAccess, Supplier.class.getMethod("get")); + registerSafeArenaAccessorMethod(metaAccess, VMError.class.getMethod("shouldNotReachHereSubstitution")); + registerSafeArenaAccessorMethod(metaAccess, ScopedAccessError.class.getMethod("newRuntimeException")); + registerSafeArenaAccessorMethod(metaAccess, Throwable.class.getMethod("getMessage")); + registerSafeArenaAccessorMethod(metaAccess, ReflectionUtil.lookupMethod(Throwable.class, "fillInStackTrace", int.class)); + + /* + * Calls to the following methods may remain in the @Scoped-annotated methods because they + * don't actually access the native memory in a way that it could lead to a crash. They do + * syscalls which can handle unmapped memory gracefully. However, any changes in class + * 'MappedMemoryUtils' must be carefully considered! + */ + Class mappedMemoryUtils = Target_java_nio_MappedMemoryUtils.class; + registerSafeArenaAccessorMethod(metaAccess, ReflectionUtil.lookupMethod(mappedMemoryUtils, "force", FileDescriptor.class, long.class, boolean.class, long.class, long.class)); + registerSafeArenaAccessorMethod(metaAccess, ReflectionUtil.lookupMethod(mappedMemoryUtils, "isLoaded", long.class, boolean.class, long.class)); + registerSafeArenaAccessorMethod(metaAccess, ReflectionUtil.lookupMethod(mappedMemoryUtils, "unload", long.class, boolean.class, long.class)); + registerSafeArenaAccessorMethod(metaAccess, ReflectionUtil.lookupMethod(SubstrateMappedMemoryUtils.class, "load", long.class, boolean.class, long.class)); + } + + protected void registerSafeArenaAccessorClass(MetaAccessProvider metaAccess, Class klass) { + neverAccessesSharedArena.add(metaAccess.lookupJavaType(klass)); + } + + protected void registerSafeArenaAccessorMethod(MetaAccessProvider metaAccess, Executable method) { + neverAccessesSharedArenaMethods.add(metaAccess.lookupJavaMethod(method)); + } + + public EconomicSet getNeverAccessesSharedArena() { + return neverAccessesSharedArena; + } + + public EconomicSet getNeverAccessesSharedArenaMethods() { + return neverAccessesSharedArenaMethods; } @Override diff --git a/substratevm/src/com.oracle.svm.hosted.foreign/src/com/oracle/svm/hosted/foreign/phases/SubstrateOptimizeSharedArenaAccessPhase.java b/substratevm/src/com.oracle.svm.hosted.foreign/src/com/oracle/svm/hosted/foreign/phases/SubstrateOptimizeSharedArenaAccessPhase.java new file mode 100644 index 000000000000..32685939c6f1 --- /dev/null +++ b/substratevm/src/com.oracle.svm.hosted.foreign/src/com/oracle/svm/hosted/foreign/phases/SubstrateOptimizeSharedArenaAccessPhase.java @@ -0,0 +1,1227 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.hosted.foreign.phases; + +import static jdk.graal.compiler.debug.DebugContext.VERY_DETAILED_LEVEL; + +import java.lang.foreign.Arena; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Deque; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; + +import org.graalvm.collections.EconomicMap; +import org.graalvm.collections.EconomicSet; +import org.graalvm.collections.Equivalence; +import org.graalvm.word.LocationIdentity; + +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.nodes.ClusterNode; +import com.oracle.svm.core.nodes.foreign.MemoryArenaValidInScopeNode; +import com.oracle.svm.core.nodes.foreign.ScopedMemExceptionHandlerClusterNode.ClusterBeginNode; +import com.oracle.svm.core.nodes.foreign.ScopedMemExceptionHandlerClusterNode.ExceptionInputNode; +import com.oracle.svm.core.nodes.foreign.ScopedMemExceptionHandlerClusterNode.ExceptionPathNode; +import com.oracle.svm.core.nodes.foreign.ScopedMemExceptionHandlerClusterNode.RegularPathNode; +import com.oracle.svm.core.nodes.foreign.ScopedMethodNode; +import com.oracle.svm.hosted.foreign.ForeignFunctionsFeature; +import com.oracle.svm.hosted.meta.HostedMethod; +import com.oracle.svm.hosted.meta.HostedType; + +import jdk.graal.compiler.core.common.cfg.CFGLoop; +import jdk.graal.compiler.debug.Assertions; +import jdk.graal.compiler.debug.DebugContext; +import jdk.graal.compiler.debug.GraalError; +import jdk.graal.compiler.debug.TTY; +import jdk.graal.compiler.graph.Node; +import jdk.graal.compiler.graph.NodeBitMap; +import jdk.graal.compiler.graph.NodeStack; +import jdk.graal.compiler.graph.Position; +import jdk.graal.compiler.nodeinfo.InputType; +import jdk.graal.compiler.nodes.EndNode; +import jdk.graal.compiler.nodes.FixedNode; +import jdk.graal.compiler.nodes.FixedWithNextNode; +import jdk.graal.compiler.nodes.FrameState; +import jdk.graal.compiler.nodes.GraphState; +import jdk.graal.compiler.nodes.GraphState.StageFlag; +import jdk.graal.compiler.nodes.GuardPhiNode; +import jdk.graal.compiler.nodes.IfNode; +import jdk.graal.compiler.nodes.Invoke; +import jdk.graal.compiler.nodes.LoopBeginNode; +import jdk.graal.compiler.nodes.MergeNode; +import jdk.graal.compiler.nodes.NodeView; +import jdk.graal.compiler.nodes.PhiNode; +import jdk.graal.compiler.nodes.SafepointNode; +import jdk.graal.compiler.nodes.StructuredGraph; +import jdk.graal.compiler.nodes.ValueNode; +import jdk.graal.compiler.nodes.ValuePhiNode; +import jdk.graal.compiler.nodes.cfg.ControlFlowGraph; +import jdk.graal.compiler.nodes.cfg.ControlFlowGraph.RecursiveVisitor; +import jdk.graal.compiler.nodes.cfg.HIRBlock; +import jdk.graal.compiler.nodes.memory.MemoryAccess; +import jdk.graal.compiler.nodes.memory.MemoryKill; +import jdk.graal.compiler.nodes.memory.MemoryPhiNode; +import jdk.graal.compiler.nodes.memory.SingleMemoryKill; +import jdk.graal.compiler.nodes.util.GraphUtil; +import jdk.graal.compiler.phases.BasePhase; +import jdk.graal.compiler.phases.RecursivePhase; +import jdk.graal.compiler.phases.common.CanonicalizerPhase; +import jdk.graal.compiler.phases.graph.ReentrantBlockIterator; +import jdk.graal.compiler.phases.graph.ReentrantBlockIterator.BlockIteratorClosure; +import jdk.graal.compiler.phases.schedule.SchedulePhase; +import jdk.graal.compiler.phases.tiers.MidTierContext; +import jdk.internal.foreign.MemorySessionImpl; +import jdk.vm.ci.meta.ResolvedJavaMethod; +import jdk.vm.ci.meta.ResolvedJavaType; + +/** + * Implements support for shared arenas on SVM. An {@link Arena} can be shared across multiple + * threads by {@link Arena#ofShared()} in which case closing of the arena needs to be recognized by + * other threads concurrently accessing such an arena. + * + * For details on the exception path see the Javadoc in scoped memory access substitutions in {@code + * Target_jdk_internal_misc_ScopedMemoryAccess}. Please read that first before reading the doc of + * this phase as it implements the abstract idea expressed in the substitution code. + * + * The general pattern for checking session validity can be seen in + * {@link com.oracle.svm.core.foreign.SubstrateForeignUtil#sessionExceptionHandler(MemorySessionImpl, Object, long)}. + * + * The rest of this doc talks about finding and inserting control flow modeling the validity checks + * for a memory session. The general invariant that must always hold is that there must be no + * safepoint between the check of a memory access and the memory access itself. The idea is roughly + * explained by example in the following pieces of code: + * + *
+ * scopeCheck {
+ *     safepoint();
+ *     // needs a check because safepoint happened before, could have closed arena
+ *     useSession();
+ * }
+ * 
+ * + *
+ * scopeCheck {
+ *     // no check needed because no safepoint happened since the scope check
+ *     useSession();
+ *     safepoint();
+ * }
+ * 
+ * + *
+ * scopeCheck {
+ *     while (header) {
+ *         // Needs a check because a safepoint may have happened along the loop backedge.
+ *         useSession();
+ *         safepoint();
+ *     }
+ * }
+ * 
+ * + *
+ * scopeCheck {
+ *     while (header) {
+ *         // needs a check not because its dominated by a safepoint but inside a loop if there is
+ *         // a single safepoint possible the entire loop needs checking
+ *         useSession();
+ *         useSession(); // does not need a check, because we already checked before a dominating
+ *                       // node and no safepoint in between yet.
+ *         safepoint();
+ *     }
+ * }
+ * 
+ * + * We analyze the code and ensure that every access that uses a {@link MemorySessionImpl} or a value + * associated with a session is checked for potential concurrent close. Concurrent closing on SVM + * can only happen at a safepoint. Thus, we analyze the graph for a safepoint. By dominance we try + * to use a minimal set of checks, in a sense that every path dominated by a safepoint (or in a loop + * where there is a safepoint on any backedge) checks a minimal number of times. + * + * We do this by analyzing the dominator tree of a program. Every time we see the entry of a scoped + * method (checked by special nodes {@link MemoryArenaValidInScopeNode} inserted by the parser) we + * start analyzing memory access patterns for shared arena usage. We mark all of those accesses + * together with the respective session. Later we minimize the checks by scanning over the + * {@link ControlFlowGraph} in reverse post order only inserting the minimal amount of session + * checks. + * + * As a running example consider the following piece of code + * + *
+ * void foo(MemorySession m1) {
+ *     scopedAccess(m1);
+ *     {
+ *         if (sth) {
+ *             memAccess(m1);
+ *             memAccess(m1);
+ *             memAccess(m1);
+ *         }
+ *         memAccess(m1);
+ *         safepoint();
+ *         memAccess(m1);
+ *         memAccess(m1);
+ *     }
+ *     scopedAccess(m1);
+ *     {
+ *         for (int i = 0; i < firstLimit; i++) {
+ *             code();
+ *             memAccess(m1);
+ *             for (int j = 0; j < secondLimit; j++) {
+ *                 moreCode();
+ *                 memAccess(m1);
+ *                 safepoint();
+ *                 memAccess(m1);
+ *             }
+ *             memAccess(m1);
+ *             evenMoreCode();
+ *             memAccess(m1);
+ *         }
+ *     }
+ * }
+ * 
+ * + * We process the dominator tree of the program and record all accesses. We record per node (see + * {@code processNode in this file}) a set of {@link ScopedAccess} - such can be a safepoint or a + * regular memory operation. If we are inside a loop we also have to record a safepoint entry per + * session for every loop header between the scope start and this safepoint: this is necessary as + * any path in a nested loop that can do a safepoint. + * + * See the following pseudo code with annotations as a rough depiction of what is going on + * + *
+ * void foo(MemorySession m1) {
+ *     scopedAccess(m1); // start check m1
+ *     {
+ *         if (sth) {
+ *             memAccess(m1); // m1 access
+ *             memAccess(m1); // m1 access
+ *             memAccess(m1); // m1 access
+ *         }
+ *         memAccess(m1); // m1 access
+ *         safepoint();  // safepoint potentially closing m1
+ *         memAccess(m1); // m1 access
+ *         memAccess(m1); // m1 access
+ *     } // stop check m1
+ *     scopedAccess(m1); // start check m1
+ *     {
+ *         for (int i = 0; i < firstLimit; i++) { // header of loop with safepoint potentially
+ *                                                // closing m1
+ *             code();
+ *             memAccess(m1); // m1 access
+ *             for (int j = 0; j < secondLimit; j++) { // header of loop with safepoint potentially
+ *                                                     // closing m1
+ *                 moreCode();
+ *                 memAccess(m1); // m1 access
+ *                 safepoint();  // safepoint potentially closing m1
+ *                 memAccess(m1); // m1 access
+ *             }
+ *             memAccess(m1); // m1 access
+ *             evenMoreCode();
+ *             memAccess(m1); // m1 access
+ *         }
+ *     }
+ * }
+ * 
+ * + * In order to transport this information during optimziation we create a so called "sugared" graph. + * That is a mapping of the nodes of the structured graph to scope using accesses. For this we + * record a mapping of each node to a list of scoped accesses + * {@code EconomicMap>}. A {@code ScopedAccess} can be a safepoint or a + * memory access. Every time we process a memory access or a safepoint we record an entry. If we are + * processing a safepoint node every value associated with an open scope is enqueued for checking. + * + * The following pseudo code should outline the "sugared" graph concept before optimization + * + *
+ * void foo(MemorySession m1) {
+ *     scopedAccess(m1); // <- memoryAccessEntry(m1)
+ *     {
+ *         if (sth) {
+ *             memAccess(m1); // <- memoryAccessEntry(m1)
+ *             memAccess(m1);// <- memoryAccessEntry(m1)
+ *             memAccess(m1);// <- memoryAccessEntry(m1)
+ *         }
+ *         memAccess(m1); // <- memoryAccessEntry(m1)
+ *         safepoint(); // <- safepointEntry(m1)
+ *         memAccess(m1); // <- memoryAccessEntry(m1)
+ *         memAccess(m1);// <- memoryAccessEntry(m1)
+ *     }
+ *     scopedAccess(m1);
+ *     {
+ *         for (int i = 0; i < firstLimit; i++) { // <- safepointEntry(m1)
+ *             code();
+ *             memAccess(m1); // <- memoryAccessEntry(m1)
+ *             for (int j = 0; j < secondLimit; j++) {// <- safepointEntry(m1)
+ *                 moreCode();
+ *                 memAccess(m1);// <- memoryAccessEntry(m1)
+ *                 safepoint();// // <- safepointEntry(m1)
+ *                 memAccess(m1);// <- memoryAccessEntry(m1)
+ *             }
+ *             memAccess(m1);// <- memoryAccessEntry(m1)
+ *             evenMoreCode();
+ *             memAccess(m1);// <- memoryAccessEntry(m1)
+ *         }
+ *     }
+ * }
+ * 
+ * + * + * With this "sugared" graph we perform a reverse post order iteration of the control flow graph. + * This is implemented in {@link MinimalSessionChecks}: we use a scan line based approach going over + * every node we encounter. We encode this "scan line" in a {@link NodeBitMap}. Every time we find a + * safepoint entry in the sugared graph we mark the associated node to be checked. The associated + * node representes the corresponding memory session. If its marked with 1 we need to check its + * validity before we can access it the next time. This however is done lazily, only if an access is + * encountered we actually check the validity. Marking a session for validity checking means setting + * the value in the node bit map to 1. This means it has to be checked. If we encounter an access + * operation we duplicate the checking control flow and clear the bit. Only a safepoint entry will + * mark the bit again so repetitive access will NOT be checked. Also this approach should guarantee + * a minimal lazy throwing semantic== this means we only throw on a closed session if we are + * actually accessing it after close. If there is a close we are inside a loop that accessed it but + * we are after the safepoint and the loop is about to finish we do not check it again. + * + * For the example above using the scanline approach would create the following final code: + * + *
+ * void foo(MemorySession m1) {
+ *     if (sth) { // no checks needed here
+ *         memAccess(m1);
+ *         memAccess(m1);
+ *         memAccess(m1);
+ *     }
+ *     memAccess(m1);
+ *     safepoint(); // first check after safepoint
+ *     checkSessionValid(m1);
+ *     memAccess(m1);
+ *     memAccess(m1);
+ *
+ *     for (int i = 0; i < firstLimit; i++) {
+ *         code();
+ *         // check because safepoint on a backedge could have caused invalidation, we do not know
+ *         // which backedge, that is impossible to know so we have to check
+ *         checkSessionValid(m1);
+ *         memAccess(m1);
+ *         for (int j = 0; j < secondLimit; j++) {
+ *             moreCode(); // same, in inner loop safepoint could have happened, need to check
+ *             checkSessionValid(m1);
+ *             memAccess(m1);
+ *             safepoint(); // check after safepoint
+ *             checkSessionValid(m1);
+ *             memAccess(m1);
+ *         }
+ *         // check because the inner loop contains a safepoint, it can be we safepoint but dont
+ *         // check because no access happened
+ *         checkSessionValid(m1);
+ *         memAccess(m1);
+ *         evenMoreCode();
+ *         memAccess(m1);
+ *     }
+ * }
+ * 
+ * + * For loop backedges we have to use a conservative approach - if any backedge may safepoint we need + * to check on every enclosing loop because the inner loop could have been executed any time we go + * through a backedge (and visit the loop header). + */ +public class SubstrateOptimizeSharedArenaAccessPhase extends BasePhase implements RecursivePhase { + + final CanonicalizerPhase canonicalizer; + + public SubstrateOptimizeSharedArenaAccessPhase(CanonicalizerPhase canonicalizer) { + this.canonicalizer = canonicalizer; + } + + @Override + public Optional notApplicableTo(GraphState graphState) { + return NotApplicable.ifAny(NotApplicable.unlessRunAfter(this, StageFlag.SAFEPOINTS_INSERTION, graphState)); + } + + @Override + public boolean shouldApply(StructuredGraph graph) { + return graph.getNodes().filter(MemoryArenaValidInScopeNode.class).count() > 0 || graph.getNodes().filter(ScopedMethodNode.class).count() > 0; + } + + /** + * Log building, processing and duplication of shared arena exception handlers to stdout: + * development only, creates a lot of output. + */ + public static final boolean LOG_SHARED_ARENAS = false; + + /** + * Unconditionally run a schedule before this phase and after every larger graph rewrite. Very + * runtime intensive - only use during development. + */ + public static final boolean RUN_SHARED_ARENA_SCHEDULE_VERIFICATION = false; + + /** + * Only apply the exception handler duplication for methods matching the debug dump filter. + * Helps to narrow down problems in large image builds, only use during development. + */ + public static final boolean ONLY_DUPLICATE_DUMP_METHODS = false; + + /** + * Also dump large sets of nodes as IGV graphs during exception handler duplication - only use + * during development. + */ + public static final boolean DUMP_LARGE_NODE_SETS = false; + + /** + * Verify that there are no invokes left in the scoped methods after application of this phase. + */ + public static final boolean VERIFY_NO_DOMINATED_CALLS = true; + + static class SessionStateRawGraphCluster { + MemoryArenaValidInScopeNode scopeStart; + ClusterBeginNode clusterBegin; + FixedNode firstClusterNode; + ExceptionPathNode clusterExceptionEnd; + RegularPathNode clusterNonExceptionEnd; + ExceptionInputNode session; + + private boolean validForBuilding() { + assert scopeStart != null : Assertions.errorMessage("Must not be null"); + assert clusterBegin != null : Assertions.errorMessage("Must not be null", scopeStart); + assert clusterExceptionEnd != null : Assertions.errorMessage("Must not be null", scopeStart); + assert clusterNonExceptionEnd != null : Assertions.errorMessage("Must not be null", scopeStart); + return true; + } + + /** + * If we are not seeing the exceptional part of the exception handler method call it + * normally means either the session was unconditionally null and we can never throw or we + * never see a arena.close() operation during static analysis in which case we also can + * never throw. + */ + private boolean isNullSessionOrNeverClosedArena() { + return scopeStart != null && clusterBegin != null && clusterNonExceptionEnd != null && clusterExceptionEnd == null; + } + + static SessionStateRawGraphCluster build(MemoryArenaValidInScopeNode arenaNode) { + if (LOG_SHARED_ARENAS) { + TTY.printf("Building cluster for %s in %s%n", arenaNode, arenaNode.graph()); + } + SessionStateRawGraphCluster cluster = new SessionStateRawGraphCluster(); + for (Node usage : arenaNode.usages()) { + if (usage instanceof ClusterBeginNode clusterBegin) { + assert cluster.clusterBegin == null; + cluster.clusterBegin = clusterBegin; + } else if (usage instanceof RegularPathNode clusterNonExceptionEnd) { + assert cluster.clusterNonExceptionEnd == null; + cluster.clusterNonExceptionEnd = clusterNonExceptionEnd; + } else if (usage instanceof ExceptionPathNode clusterExceptionEnd) { + assert cluster.clusterExceptionEnd == null; + cluster.clusterExceptionEnd = clusterExceptionEnd; + } else if (usage instanceof ExceptionInputNode clusterInput) { + assert cluster.session == null; + cluster.session = clusterInput; + } + } + cluster.scopeStart = arenaNode; + if (cluster.isNullSessionOrNeverClosedArena()) { + return null; + } + assert cluster.validForBuilding(); + cluster.collectNodes(); + return cluster; + } + + NodeBitMap clusterNodes; + + private void collectNodes() { + assert clusterNodes == null; + final StructuredGraph graph = scopeStart.graph(); + clusterNodes = graph.createNodeBitMap(); + + clusterNodes.mark(clusterBegin); + + NodeStack toProcess = new NodeStack(); + toProcess.push(clusterExceptionEnd); + toProcess.push(clusterNonExceptionEnd); + + // make sure any nodes in between are also properly marked + firstClusterNode = clusterBegin; + FixedNode f = clusterBegin; + while (!(f.predecessor() instanceof IfNode)) { + clusterNodes.mark(f); + firstClusterNode = f; + ensureUniqueStates(f); + f = (FixedNode) f.predecessor(); + } + + // first collect fixed nodes + while (!toProcess.isEmpty()) { + Node cur = toProcess.pop(); + if (clusterNodes.isMarked(cur)) { + continue; + } + clusterNodes.mark(cur); + for (Node pred : cur.cfgPredecessors()) { + toProcess.push(pred); + } + ensureUniqueStates(cur); + } + + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "After duplicating states for collection of cluster"); + + toProcess.push(clusterExceptionEnd); + toProcess.push(clusterNonExceptionEnd); + + clusterNodes.mark(session); + clusterNodes.mark(scopeStart); + + if (DUMP_LARGE_NODE_SETS) { + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "Cluster data clusterBegin=%s, firstClusterNode=%s, clusterExceptionEnd=%s, clusterNonExceptionEnd=%s,session=%s", + clusterBegin, firstClusterNode, clusterExceptionEnd, clusterNonExceptionEnd, session); + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "Fixed cluster start node %s and regular nodes are %s", firstClusterNode, clusterNodes); + } + + boolean change = true; + int iterations = 0; + while (change) { + change = false; + int before = clusterNodes.count(); + // process a second time and all inputs + while (!toProcess.isEmpty()) { + Node cur = toProcess.pop(); + + // only the session node marks a boundary to which we should stop processing + // inputs + if (cur == firstClusterNode || cur == scopeStart) { + continue; + } + + for (Node pred : cur.cfgPredecessors()) { + toProcess.push(pred); + } + for (Node input : cur.inputs()) { + visitUntilCluster(clusterNodes, input); + } + // phis need duplication + if (cur instanceof MergeNode m) { + for (PhiNode phi : m.phis()) { + clusterNodes.checkAndMarkInc(phi); + for (Node input : phi.inputs()) { + visitUntilCluster(clusterNodes, input); + } + } + } + + // some nodes need duplication because of the edge type not being able to handle + // phis + for (Position p : cur.inputPositions()) { + Node input = p.get(cur); + if (input == null) { + continue; + } + if (p.getInputType() == InputType.State || p.getInputType() == InputType.Extension) { + // we cannot create phis for extensions or framestates, thus force + // duplication of them + clusterNodes.mark(input); + for (Node assocInput : input.inputs()) { + visitUntilCluster(clusterNodes, assocInput); + } + } + } + } + change = clusterNodes.count() > before; + if (iterations++ > MAX_CLUSTER_ITERATIONS) { + throw GraalError.shouldNotReachHere("Cluster nodes not stabilizing"); + } + } + // no need to duplicate the scope + clusterNodes.clear(scopeStart); + } + + private static void ensureUniqueStates(Node cur) { + // make sure states are unique info points for example also reference states and + // themselves are not deopt nodes, thus force it + for (Position p : cur.inputPositions()) { + Node input = p.get(cur); + if (input == null) { + continue; + } + if (p.getInputType() == InputType.State) { + // we cannot phi state edges + FrameState fs = (FrameState) input; + p.set(cur, fs.duplicateWithVirtualState()); + } + } + } + + } + + private static final int MAX_CLUSTER_ITERATIONS = 8; + + private static void visitUntilCluster(NodeBitMap inCluster, Node start) { + NodeBitMap visited = start.graph().createNodeBitMap(); + visitUntilCluster(inCluster, start, visited, ""); + } + + @SuppressWarnings("unused") + private static void visitUntilCluster(NodeBitMap inCluster, Node start, NodeBitMap visited, String p) { + if (LOG_SHARED_ARENAS) { + TTY.printf("%sVisiting %s%n", p, start); + } + if (visited.isMarkedAndGrow(start)) { + return; + } + if (inCluster.isMarkedAndGrow(start)) { + return; + } + if (start instanceof PhiNode phi) { + /* + * A phi is only part of the cluster if its merge is part of the cluster, else we stop + * here and its explicitly never part of the cluster. + */ + if (!inCluster.contains(phi.merge())) { + // the phi must never be part of the cluster + assert !inCluster.isMarked(phi) : "Must not mark phi if merge is not part of the cluster " + phi; + visited.mark(phi); + return; + } + } + visited.mark(start); + boolean inputsInCluster = false; + String inputIn = LOG_SHARED_ARENAS ? "" : null; + for (Node input : start.inputs()) { + visitUntilCluster(inCluster, input, visited, LOG_SHARED_ARENAS ? p + "\t" : null); + boolean inputInCluster = inCluster.isMarkedAndGrow(input); + if (LOG_SHARED_ARENAS && inputInCluster) { + inputIn += input.toString() + ","; + } + inputsInCluster = inputInCluster || inputsInCluster; + } + if (inputsInCluster) { + if (LOG_SHARED_ARENAS) { + TTY.printf("%sMarking %s, inputs in are[%s]%n", p, start, inputIn); + } + inCluster.markAndGrow(start); + } + } + + private static void scheduleVerify(StructuredGraph graph) { + if (RUN_SHARED_ARENA_SCHEDULE_VERIFICATION) { + SchedulePhase.runWithoutContextOptimizations(graph); + } + } + + @Override + protected void run(StructuredGraph graph, MidTierContext context) { + scheduleVerify(graph); + if (ONLY_DUPLICATE_DUMP_METHODS) { + if (!graph.getDebug().isDumpEnabledForMethod()) { + cleanupClusterNodes(graph, context, null); + return; + } + } + cleanupClusterNodes(graph, context, insertSessionChecks(graph, context)); + } + + private static EconomicSet insertSessionChecks(StructuredGraph graph, MidTierContext context) { + ControlFlowGraph cfg = ControlFlowGraph.newBuilder(graph).backendBlocks(true).connectBlocks(true).computeFrequency(true).computeLoops(true).computeDominators(true) + .computePostdominators(true) + .build(); + // Compute the graph with all the necessary data about scoped memory accesses. + EconomicSet calls = EconomicSet.create(); + EconomicMap> sugaredGraph = enumerateScopedAccesses(cfg, context, calls); + if (sugaredGraph == null) { + return null; + } + ReentrantBlockIterator.apply(new MinimalSessionChecks(graph, sugaredGraph, cfg, calls), cfg.getStartBlock()); + return calls; + } + + /* + * Now iterate the CFG in reverse post order. Based on the previous analysis of the control flow + * graph we know where safepoints happened and we know where we are accessing scoped memory. Now + * if there is a safepoint happening we must check the associated session accesses. If we + * checked one we can stop checking until the next safepoint. + */ + static class MinimalSessionChecks extends BlockIteratorClosure { + private final StructuredGraph graph; + private final EconomicMap> sugaredGraph; + private final ControlFlowGraph cfg; + private final EconomicSet calls; + + MinimalSessionChecks(StructuredGraph graph, EconomicMap> sugaredGraph, ControlFlowGraph cfg, EconomicSet calls) { + this.graph = graph; + this.sugaredGraph = sugaredGraph; + this.cfg = cfg; + this.calls = calls; + } + + @Override + protected NodeBitMap processBlock(HIRBlock block, NodeBitMap currentState) { + NodeBitMap effectiveCurrentState = currentState; + for (FixedNode f : block.getNodes()) { + List accesses = sugaredGraph.get(f); + if (accesses == null) { + continue; + } + + // check for safepoints first + for (ScopedAccess access : accesses) { + if (access instanceof ScopedSafepoint se) { + /* + * Note: we mark the arena here and not the session. Some nodes ref the + * session other the arena or offsets, we group all under the arena node and + * use that one as a "must check" pattern. + */ + if (effectiveCurrentState == null) { + effectiveCurrentState = graph.createNodeBitMap(); + } + effectiveCurrentState.mark(se.arenaNode); + } + } + + // check for safepoints first + for (ScopedAccess access : accesses) { + if (access instanceof ScopedSafepoint) { + continue; + } + ScopedMemoryAccess sma = (ScopedMemoryAccess) access; + if (effectiveCurrentState == null) { + effectiveCurrentState = graph.createNodeBitMap(); + } else if (effectiveCurrentState.isMarked(sma.scope.defNode)) { + // check and then clear + effectiveCurrentState.clear(sma.scope.defNode); + duplicateCheckValidStateRaw(graph, sma.scope, (FixedNode) f.predecessor(), calls); + GraalError.guarantee(cfg.blockFor(sma.scope.defNode).dominates(cfg.blockFor(f)), "%s must dominate %s", sma.scope.defNode, f); + scheduleVerify(graph); + } + } + + } + return effectiveCurrentState; + } + + @Override + protected NodeBitMap merge(HIRBlock merge, List states) { + /* + * If any of the predecessors indicated a safepoint for the given node we must assume + * path could have yielded a safepoint, thus issue a safepoint state. + */ + NodeBitMap resultMap = null; + for (NodeBitMap other : states) { + if (other == null) { + continue; + } + if (resultMap == null) { + // first time we actually found a non-null entry + resultMap = other.copy(); + continue; + } + resultMap.markAll(other); + } + return resultMap; + } + + @Override + protected NodeBitMap getInitialState() { + // avoid eagerly returning a map that is empty most of the time + return null; + } + + @Override + protected NodeBitMap cloneState(NodeBitMap oldState) { + return oldState == null ? null : oldState.copy(); + } + } + + /** + * Cleanup all remaining {@link ClusterNode} in the graph. After session check expansion they + * are not needed any more. + */ + private void cleanupClusterNodes(StructuredGraph graph, MidTierContext context, EconomicSet calls) { + if (VERIFY_NO_DOMINATED_CALLS) { + if (calls != null) { + for (DominatedCall call : calls) { + if (call.invoke.isAlive()) { + throw GraalError.shouldNotReachHere("After inserting all session checks call " + call.invoke + " was not inlined and could access a session"); + } + } + } + } + + for (MemoryArenaValidInScopeNode scopeNode : graph.getNodes().filter(MemoryArenaValidInScopeNode.class).snapshot()) { + scopeNode.delete(0); + } + // also cleanup the original cluster nodes + for (Node n : graph.getNodes()) { + if (n instanceof ClusterNode clusterNode) { + clusterNode.delete(); + } + } + canonicalizer.apply(graph, context); + scheduleVerify(graph); + } + + /** + * Definition of an access to a value representing a memory session. + */ + static class ScopedAccess { + protected final ValueNode session; + + ScopedAccess(ValueNode session) { + this.session = session; + } + } + + /** + * A safepoint call in a region that is dominated by a scoped memory access call. All such + * safepoints can close shared arenas. + */ + static class ScopedSafepoint extends ScopedAccess { + final MemoryArenaValidInScopeNode arenaNode; + + ScopedSafepoint(ValueNode session, MemoryArenaValidInScopeNode arenaNode) { + super(session); + this.arenaNode = arenaNode; + } + + } + + /** + * A scoped memory access in a region that is dominated by a scoped memory access call. All such + * accesses have to check their session(s) if a safepoint potentially happened in between. + */ + static class ScopedMemoryAccess extends ScopedAccess { + + final ReachingDefScope scope; + + ScopedMemoryAccess(ValueNode session, ReachingDefScope scope) { + super(session); + this.scope = scope; + } + } + + /** + * The "scope" opened by a scoped memory access in which the associated memory session needs to + * remain open else an exception has to be thrown. + */ + private static class ReachingDefScope { + private final MemoryArenaValidInScopeNode defNode; + + ReachingDefScope(HIRBlock defBlock, MemoryArenaValidInScopeNode defNode) { + this.defNode = defNode; + } + + private SessionStateRawGraphCluster defCluster; + + private boolean isPartOfCluster(Node n) { + if (defCluster == null) { + defCluster = SessionStateRawGraphCluster.build(defNode); + } + if (defCluster == null) { + // Note that it's possible this cluster may not be constructible. In such cases, + // aborting is necessary. This scenario occurs when we've already determined that + // session access checks are unnecessary due to certain conditions, such as the + // absence of concurrent threads or the lack of arena.close() invocations. + return false; + } + return defCluster.clusterNodes.contains(n); + } + } + + record DominatedCall(MemoryArenaValidInScopeNode defNode, Invoke invoke) { + + } + + private static EconomicMap> enumerateScopedAccesses(ControlFlowGraph cfg, MidTierContext context, EconomicSet dominatedCalls) { + EconomicMap> nodeAccesses = EconomicMap.create(); + final ResolvedJavaType memorySessionType = context.getMetaAccess().lookupJavaType(MemorySessionImpl.class); + assert memorySessionType != null; + + ControlFlowGraph.RecursiveVisitor visitor = new RecursiveVisitor<>() { + final Deque defs = new ArrayDeque<>(); + final Deque scopes = new ArrayDeque<>(); + final Deque actions = new ArrayDeque<>(); + + @Override + public Integer enter(HIRBlock b) { + int newDominatingValues = 0; + int newScopesToPop = 0; + Deque scopesToRepush = new ArrayDeque<>(); + + for (FixedNode f : b.getNodes()) { + if (f instanceof MemoryArenaValidInScopeNode mas) { + defs.push(new ReachingDefScope(b, mas)); + newDominatingValues++; + } else if (f instanceof ScopedMethodNode scope) { + if (scope.getType() == ScopedMethodNode.Type.START) { + scopes.push(scope); + newScopesToPop++; + } else if (scope.getType() == ScopedMethodNode.Type.END) { + ScopedMethodNode start = scopes.pop(); + scopesToRepush.push(start); + assert scope.getStart() == start : Assertions.errorMessage("Must match", start, scope, scope.getStart()); + } else { + throw GraalError.shouldNotReachHere("Unknown type " + scope.getType()); + } + } else { + processNode(f); + } + } + + final int finalNewDominatingValues = newDominatingValues; + final int finalNewScopesToPop = newScopesToPop; + + actions.push(new Runnable() { + + @Override + public void run() { + // remove all the dominated values + for (int i = 0; i < finalNewDominatingValues; i++) { + defs.pop(); + } + for (int i = 0; i < finalNewScopesToPop; i++) { + scopes.pop(); + } + for (int i = 0; i < scopesToRepush.size(); i++) { + scopes.push(scopesToRepush.pop()); + } + } + }); + + return 1; + } + + private void processNode(FixedNode f) { + if (!scopes.isEmpty() && f instanceof Invoke i) { + if (i.getTargetMethod() != null && calleeMightUseArena(i.getTargetMethod())) { + if (!defs.isEmpty()) { + dominatedCalls.add(new DominatedCall(defs.peek().defNode, i)); + } + } + } + if (f instanceof ClusterNode) { + return; + } + for (ReachingDefScope existingDef : defs) { + if (existingDef.isPartOfCluster(f)) { + return; + } + } + if (f instanceof SafepointNode safepoint) { + for (ReachingDefScope existingDef : defs) { + for (Node scopeAssociatedVal : existingDef.defNode.inputs()) { + EconomicSet> allLoopsToCheck = visitEveryLoopHeaderInBetween(existingDef, safepoint); + if (allLoopsToCheck != null) { + for (CFGLoop loop : allLoopsToCheck) { + // Mark the loop header as well as safepointing + LoopBeginNode lb = (LoopBeginNode) loop.getHeader().getBeginNode(); + cacheSafepointPosition(existingDef, (ValueNode) scopeAssociatedVal, lb); + + } + } + + // also check when we are actually doing the safepoint + cacheSafepointPosition(existingDef, (ValueNode) scopeAssociatedVal, f); + + } + } + } else if (f instanceof MemoryAccess) { + // only care about memory access nodes + for (ReachingDefScope existingDef : defs) { + scopedVal: for (Node scopeAssociatedVal : existingDef.defNode.inputs()) { + for (Node input : f.inputs()) { + if (visitInputsUntil(scopeAssociatedVal, input)) { + cacheAccessPosition(existingDef, (ValueNode) scopeAssociatedVal, f); + continue scopedVal; + } + } + } + } + } + } + + private void cacheAccessPosition(ReachingDefScope existingDef, ValueNode scopeAssociatedVal, FixedNode f) { + List existingAccesses = nodeAccesses.get(f); + if (existingAccesses == null) { + existingAccesses = new ArrayList<>(); + } + existingAccesses.add(new ScopedMemoryAccess(scopeAssociatedVal, existingDef)); + nodeAccesses.put(f, existingAccesses); + } + + private void cacheSafepointPosition(ReachingDefScope existingDef, ValueNode scopeAssociatedVal, FixedNode f) { + List existingAccesses = nodeAccesses.get(f); + if (existingAccesses == null) { + existingAccesses = new ArrayList<>(); + nodeAccesses.put(f, existingAccesses); + } + existingAccesses.add(new ScopedSafepoint(scopeAssociatedVal, existingDef.defNode)); + } + + /** + * Special methods known to never access a memory arena. Normally part of the path that + * checks the `checkValidStateRaw` and throws an exception otherwise. + */ + private boolean calleeMightUseArena(ResolvedJavaMethod targetMethod) { + if (Uninterruptible.Utils.isUninterruptible(targetMethod)) { + // Uninterruptible can never safepoint + return false; + } + if (ForeignFunctionsFeature.singleton().getNeverAccessesSharedArena().contains(((HostedType) targetMethod.getDeclaringClass()).getWrapped())) { + return false; + } + return !ForeignFunctionsFeature.singleton().getNeverAccessesSharedArenaMethods().contains(((HostedMethod) targetMethod).getWrapped()); + } + + private static boolean visitInputsUntil(Node key, Node start) { + NodeStack toProcess = new NodeStack(); + toProcess.push(start); + NodeBitMap visited = start.graph().createNodeBitMap(); + while (!toProcess.isEmpty()) { + Node cur = toProcess.pop(); + if (visited.isMarked(cur)) { + continue; + } + visited.mark(cur); + // fixed nodes are checked themselves + if (cur instanceof FixedNode) { + continue; + } + if (cur == key) { + return true; + } + for (Node input : cur.inputs()) { + toProcess.push(input); + } + } + return false; + } + + private EconomicSet> visitEveryLoopHeaderInBetween(ReachingDefScope def, FixedNode usage) { + FixedNode cur = (FixedNode) usage.predecessor(); + NodeBitMap processedNodes = cfg.graph.createNodeBitMap(); + EconomicSet> loopsToCheck = null; + outer: while (cur != def.defNode && cur != null) { + HIRBlock currentBlock = cfg.blockFor(cur); + for (FixedNode f : GraphUtil.predecessorIterable(cur)) { + if (processedNodes.isMarked(f)) { + continue; + } + /* + * We are processing a loop: if usage is inside a loop or a path that goes + * over the body of a loop, any path in that loop might cause a state node + * to be executed that references the scoped session, thus, if we go over a + * loop, ensure to process all nodes of that loop body as well. + */ + CFGLoop cfgLoop = cfg.blockFor(f).getLoop(); + if (cfgLoop != null) { + if (loopsToCheck == null) { + loopsToCheck = EconomicSet.create(); + } + loopsToCheck.add(cfgLoop); + } + if (f == def.defNode) { + break outer; + } + if (f == currentBlock.getBeginNode()) { + // go to the next one + cur = currentBlock.getDominator().getEndNode(); + continue outer; + } + // only mark non block boundary nodes to avoid redoing any block boundary + // iterations + processedNodes.mark(f); + } + } + return loopsToCheck; + } + + @Override + public void exit(HIRBlock b, Integer pushedForBlock) { + for (int i = 0; i < pushedForBlock; i++) { + actions.pop().run(); + } + } + + }; + + cfg.visitDominatorTreeDefault(visitor); + return nodeAccesses.size() > 0 ? nodeAccesses : null; + } + + private static void duplicateCheckValidStateRaw(StructuredGraph graph, ReachingDefScope def, FixedNode fixedUsage, EconomicSet calls) { + SessionStateRawGraphCluster cluster = SessionStateRawGraphCluster.build(def.defNode); + + if (cluster == null) { + // do nothing, the session is probably null, all good + graph.getDebug().dump(VERY_DETAILED_LEVEL, graph, "Aborting %s with usage %s because cluster is null", def, fixedUsage); + return; + } + + Iterator it = calls.iterator(); + while (it.hasNext()) { + if (cluster.clusterNodes.contains(it.next().invoke.asNode())) { + /* + * Normally we do not allow any non-inlined invokes in scope annotated methods + * because we cannot guarantee then that a session is not accessed in the callee. + * For the calls however in the exception handling portion of a failed + * isSessionValid check we are only calling things we control and those callees are + * safe to not inline. + */ + it.remove(); + } + + } + + if (cluster.clusterNodes.isMarked(fixedUsage)) { + // naturally the cluster references itself + return; + } + + if (DUMP_LARGE_NODE_SETS) { + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "Before duplicating cluster at %s %s with cluster nodes %s", def.defNode, fixedUsage, cluster.clusterNodes); + } + EconomicMap duplicates = graph.addDuplicates(cluster.clusterNodes, graph, cluster.clusterNodes.count(), null, false); + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "After duplicating cluster at %s %s", def.defNode, fixedUsage); + + if (LOG_SHARED_ARENAS) { + TTY.printf("Duplication map %s%n", duplicates); + } + + FixedWithNextNode exceptionPath = cluster.clusterExceptionEnd; + FixedNode exceptionPathNext = exceptionPath.next(); + MergeNode mergeException = graph.add(new MergeNode()); + EndNode exceptionPathNewEnd = graph.add(new EndNode()); + EndNode exceptionPathOldEnd = graph.add(new EndNode()); + + ExceptionPathNode epn = cluster.clusterExceptionEnd; + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "Before creating new exception phi for old val %s", epn); + + exceptionPath.setNext(null); + exceptionPath.setNext(exceptionPathOldEnd); + mergeException.addForwardEnd(exceptionPathOldEnd); + mergeException.addForwardEnd(exceptionPathNewEnd); + mergeException.setNext(exceptionPathNext); + + ((FixedWithNextNode) duplicates.get(cluster.clusterExceptionEnd)).setNext(exceptionPathNewEnd); + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "After merging exception CF path in %s", mergeException); + + // now add the entire section into the control flow before the actual node with state + assert fixedUsage instanceof FixedWithNextNode : Assertions.errorMessage("Must be a fixed node we can hang a check into", fixedUsage); + + FixedWithNextNode insertPoint = (FixedWithNextNode) fixedUsage.asFixedNode(); + FixedNode insertPointNext = insertPoint.next(); + insertPoint.setNext(null); + FixedNode duplicateClusterEntry = (FixedNode) duplicates.get(cluster.firstClusterNode); + insertPoint.setNext(duplicateClusterEntry); + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "After hanging check into regular CF %s", insertPoint); + Node nonExceptionEndDuplicate = duplicates.get(cluster.clusterNonExceptionEnd); + ((FixedWithNextNode) nonExceptionEndDuplicate).setNext(insertPointNext); + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "After going back into after check for regular cf %s", insertPointNext); + + EconomicMap createdPhis = EconomicMap.create(Equivalence.DEFAULT); + for (Node clusterNode : cluster.clusterNodes) { + List clusterNodeUsages = clusterNode.usages().snapshot(); + graph.getDebug().dump(VERY_DETAILED_LEVEL, graph, "Processing cluster node %s with usages %s", clusterNode, clusterNodeUsages); + for (Node usage : clusterNodeUsages) { + if (!cluster.clusterNodes.isMarked(usage)) { + for (Position p : usage.inputPositions()) { + Node input = p.get(usage); + if (input == clusterNode) { + PhiKey key = new PhiKey(input, p.getInputType()); + PhiNode phi = useOrCreatePhi(graph, input, p, createdPhis, cluster.clusterNodes, mergeException, key, usage, duplicates); + graph.getDebug().dump(VERY_DETAILED_LEVEL, graph, "Usage %s of %s outside of cluster", usage, input); + graph.getDebug().dump(VERY_DETAILED_LEVEL, graph, "Before setting input %s of %s to %s", input, usage, phi); + p.set(usage, phi); + graph.getDebug().dump(VERY_DETAILED_LEVEL, graph, "Setting input %s of %s to %s", input, usage, phi); + } + } + } + } + } + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "After creating phis at %s", mergeException); + + // delete all cluster nodes to ensure the Arena node has only the expected usages + for (Node n : duplicates.getValues()) { + if (n instanceof ClusterNode c) { + c.delete(); + } + } + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "After deleting leftover clusters"); + } + + private static PhiNode useOrCreatePhi(StructuredGraph graph, Node input, Position p, EconomicMap createdPhis, NodeBitMap clusterNodes, MergeNode merge, PhiKey key, Node usage, + EconomicMap duplicates) { + PhiNode phi = createdPhis.get(key); + if (phi == null) { + phi = switch (p.getInputType()) { + case Value -> + graph.addWithoutUnique(new ValuePhiNode(((ValueNode) input).stamp(NodeView.DEFAULT).unrestricted(), merge)); + case Memory -> graph.addWithoutUnique(new MemoryPhiNode(merge, getLocationIdentity(input))); + case Guard -> graph.addWithoutUnique(new GuardPhiNode(merge)); + default -> throw GraalError.shouldNotReachHere( + String.format("Unexpected edge type %s from %s to %s, [cluster nodes %s]", p.getInputType(), input, usage, clusterNodes)); // ExcludeFromJacocoGeneratedReport + }; + createdPhis.put(key, phi); + phi.addInput((ValueNode) input); + phi.addInput((ValueNode) duplicates.get(input)); + graph.getDebug().dump(VERY_DETAILED_LEVEL, graph, "Created phi %s for original cluster node %s", phi, input); + } + return phi; + } + + private static LocationIdentity getLocationIdentity(Node node) { + if (node instanceof MemoryPhiNode) { + return ((MemoryPhiNode) node).getLocationIdentity(); + } else if (node instanceof MemoryAccess) { + return ((MemoryAccess) node).getLocationIdentity(); + } else if (MemoryKill.isSingleMemoryKill(node)) { + return ((SingleMemoryKill) node).getKilledLocationIdentity(); + } else if (MemoryKill.isMultiMemoryKill(node)) { + return LocationIdentity.any(); + } else { + throw GraalError.shouldNotReachHere("unexpected node as part of memory graph: " + node); // ExcludeFromJacocoGeneratedReport + } + } + + private static class PhiKey { + Node input; + InputType type; + + PhiKey(Node input, InputType type) { + super(); + this.input = input; + this.type = type; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof PhiKey) { + return input == ((PhiKey) obj).input && type == ((PhiKey) obj).type; + } + return false; + } + + @Override + public int hashCode() { + return input.hashCode() * type.hashCode(); + } + } + +} diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/NativeImageGenerator.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/NativeImageGenerator.java index 76ca149fd867..5f6e5f986b3e 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/NativeImageGenerator.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/NativeImageGenerator.java @@ -1603,6 +1603,11 @@ private static Suites modifySuites(SubstrateBackend backend, Suites suites, Feat lowTier.replacePlaceholder(AddressLoweringPhase.class, addressLoweringPhase); lowTier.replacePlaceholder(TransplantGraphsPhase.class, new TransplantGraphsPhase(createSuitesForLateSnippetTemplate(suites))); + if (hosted && SharedArenaSupport.isAvailable()) { + var pos = midTier.findPhase(FrameStateAssignmentPhase.class, true); + pos.add(SharedArenaSupport.singleton().createOptimizeSharedArenaAccessPhase()); + } + /* * Graal inserts only loop safepoints. We want a SafepointNode also before every return. Our * safepoint insertion phase inserts both kinds of safepoints. diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SharedArenaSupport.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SharedArenaSupport.java new file mode 100644 index 000000000000..41105b127a1c --- /dev/null +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SharedArenaSupport.java @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.hosted; + +import java.lang.annotation.Annotation; + +import org.graalvm.nativeimage.AnnotationAccess; +import org.graalvm.nativeimage.ImageSingletons; + +import com.oracle.graal.pointsto.infrastructure.OriginalMethodProvider; +import com.oracle.graal.pointsto.meta.AnalysisMetaAccess; +import com.oracle.svm.util.ReflectionUtil; + +import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.phases.BasePhase; +import jdk.graal.compiler.phases.tiers.MidTierContext; +import jdk.vm.ci.meta.ResolvedJavaMethod; + +public interface SharedArenaSupport { + + @SuppressWarnings("unchecked") // + Class SCOPED_ANNOTATION = // + (Class) ReflectionUtil.lookupClass("jdk.internal.misc.ScopedMemoryAccess$Scoped"); + + @Fold + static boolean isAvailable() { + return ImageSingletons.contains(SharedArenaSupport.class); + } + + @Fold + static SharedArenaSupport singleton() { + return ImageSingletons.lookup(SharedArenaSupport.class); + } + + BasePhase createOptimizeSharedArenaAccessPhase(); + + void registerSafeArenaAccessorClass(AnalysisMetaAccess metaAccess, Class klass); + + static boolean isScopedMethod(ResolvedJavaMethod method) { + ResolvedJavaMethod originalMethod = OriginalMethodProvider.getOriginalMethod(method); + return originalMethod != null && AnnotationAccess.isAnnotationPresent(originalMethod, SCOPED_ANNOTATION); + } +} diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/phases/InlineBeforeAnalysisPolicyUtils.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/phases/InlineBeforeAnalysisPolicyUtils.java index ccc6bbcfd487..fb7144860545 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/phases/InlineBeforeAnalysisPolicyUtils.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/phases/InlineBeforeAnalysisPolicyUtils.java @@ -34,6 +34,8 @@ import com.oracle.graal.pointsto.meta.AnalysisMetaAccess; import com.oracle.graal.pointsto.meta.AnalysisMethod; import com.oracle.graal.pointsto.phases.InlineBeforeAnalysisPolicy; +import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.heap.RestrictHeapAccess; import com.oracle.svm.core.option.HostedOptionKey; @@ -41,6 +43,7 @@ import com.oracle.svm.core.util.VMError; import com.oracle.svm.hosted.ReachabilityRegistrationNode; import com.oracle.svm.hosted.SVMHost; +import com.oracle.svm.hosted.SharedArenaSupport; import com.oracle.svm.hosted.code.FactoryMethodSupport; import com.oracle.svm.hosted.methodhandles.MethodHandleInvokerRenamingSubstitutionProcessor; import com.oracle.svm.util.ReflectionUtil; @@ -123,6 +126,12 @@ public static class Options { @Option(help = "Maximum number of invokes for constructors inlined into factory methods before static analysis")// public static final HostedOptionKey InlineBeforeAnalysisConstructorAllowedInvokes = new HostedOptionKey<>(50); + + @Option(help = "Maximum number of computation nodes for methods inlined into scoped methods before static analysis")// + public static final HostedOptionKey InlineBeforeAnalysisScopedAllowedNodes = new HostedOptionKey<>(1_000); + + @Option(help = "Maximum number of invokes for methods inlined into scoped methods before static analysis")// + public static final HostedOptionKey InlineBeforeAnalysisScopedAllowedInvokes = new HostedOptionKey<>(50); } /* Cached values of options, to avoid repeated option lookup. */ @@ -138,15 +147,23 @@ public static class Options { public final boolean optionTrackNeverNullInstanceFields = PointstoOptions.TrackNeverNullInstanceFields.getValue(HostedOptionValues.singleton()); public final int optionConstructorAllowedNodes = Options.InlineBeforeAnalysisConstructorAllowedNodes.getValue(); public final int optionConstructorAllowedInvokes = Options.InlineBeforeAnalysisConstructorAllowedInvokes.getValue(); + public final int optionScopedAllowedNodes = Options.InlineBeforeAnalysisScopedAllowedNodes.getValue(); + public final int optionScopedAllowedInvokes = Options.InlineBeforeAnalysisScopedAllowedInvokes.getValue(); + + public final boolean optionForeignAPISupport = SubstrateOptions.ForeignAPISupport.getValue(); @SuppressWarnings("unchecked") // private static final Class COMPILED_LAMBDA_FORM_ANNOTATION = // - (Class) ReflectionUtil.lookupClass(false, "java.lang.invoke.LambdaForm$Compiled"); + (Class) ReflectionUtil.lookupClass("java.lang.invoke.LambdaForm$Compiled"); public static boolean isMethodHandleIntrinsificationRoot(ResolvedJavaMethod method) { return AnnotationAccess.isAnnotationPresent(method, COMPILED_LAMBDA_FORM_ANNOTATION); } + public boolean isScopedMethod(ResolvedJavaMethod method) { + return optionForeignAPISupport && SharedArenaSupport.isScopedMethod(method); + } + public boolean shouldInlineInvoke(GraphBuilderContext b, SVMHost hostVM, AccumulativeInlineScope policyScope, AnalysisMethod method) { boolean result = shouldInlineInvoke0(b, hostVM, policyScope, method); if (result && policyScope != null) { @@ -167,7 +184,18 @@ private boolean shouldInlineInvoke0(GraphBuilderContext b, SVMHost hostVM, Accum return true; } - boolean inMethodHandleIntrinsification = policyScope != null && policyScope.accumulativeCounters.inMethodHandleIntrinsification; + /* + * Calls to methods annotated with @AlwaysInline or @ForceInline should not be inlined if + * the current method is a scoped method. The inlining of callees with such annotations is + * left to later phases. Here, we just want to inline callees which won't be inlined by + * other phases. + */ + if (isScopedMethod(b.getMethod()) && + (AnnotationAccess.isAnnotationPresent(method, AlwaysInline.class) || AnnotationAccess.isAnnotationPresent(method, ForceInline.class))) { + return false; + } + + boolean inMethodHandleIntrinsification = policyScope != null && policyScope.accumulativeCounters.inMethodHandleIntrinsification(); int allowedInlinings = inMethodHandleIntrinsification ? optionMethodHandleAllowedInlinings : optionAllowedInlinings; if (policyScope != null && policyScope.accumulativeCounters.totalInlinedMethods >= allowedInlinings) { return false; @@ -250,21 +278,38 @@ public boolean alwaysInlineInvoke(@SuppressWarnings("unused") AnalysisMetaAccess return false; } + enum InliningScopeType { + None, + MethodHandleIntrinsification, + ConstructorInlining, + ScopedMethod + } + static final class AccumulativeCounters { int maxNodes; int maxInvokes; - final boolean inMethodHandleIntrinsification; - final boolean inConstructorInlining; + final InliningScopeType inliningScopeType; int numNodes; int numInvokes; int totalInlinedMethods; - private AccumulativeCounters(int maxNodes, int maxInvokes, boolean inMethodHandleIntrinsification, boolean inConstructorInlining) { + private AccumulativeCounters(int maxNodes, int maxInvokes, InliningScopeType inliningScopeType) { this.maxNodes = maxNodes; this.maxInvokes = maxInvokes; - this.inMethodHandleIntrinsification = inMethodHandleIntrinsification; - this.inConstructorInlining = inConstructorInlining; + this.inliningScopeType = inliningScopeType; + } + + public boolean inMethodHandleIntrinsification() { + return inliningScopeType == InliningScopeType.MethodHandleIntrinsification; + } + + public boolean inConstructorInlining() { + return inliningScopeType == InliningScopeType.ConstructorInlining; + } + + public boolean inAnyInliningScope() { + return inliningScopeType != InliningScopeType.None; } } @@ -289,16 +334,19 @@ public AccumulativeInlineScope createAccumulativeInlineScope(AccumulativeInlineS * permit more types of nodes, but not recursively, i.e., not if we are already in a * method handle intrinsification context. */ - accumulativeCounters = new AccumulativeCounters(optionMethodHandleAllowedNodes, optionMethodHandleAllowedInvokes, true, false); + accumulativeCounters = new AccumulativeCounters(optionMethodHandleAllowedNodes, optionMethodHandleAllowedInvokes, InliningScopeType.MethodHandleIntrinsification); } else if (optionTrackNeverNullInstanceFields && FactoryMethodSupport.isFactoryMethod(caller)) { - accumulativeCounters = new AccumulativeCounters(optionConstructorAllowedNodes, optionConstructorAllowedInvokes, false, true); + accumulativeCounters = new AccumulativeCounters(optionConstructorAllowedNodes, optionConstructorAllowedInvokes, InliningScopeType.ConstructorInlining); + + } else if (isScopedMethod(caller)) { + accumulativeCounters = new AccumulativeCounters(optionScopedAllowedNodes, optionScopedAllowedInvokes, InliningScopeType.ScopedMethod); } else { - accumulativeCounters = new AccumulativeCounters(optionAllowedNodes, optionAllowedInvokes, false, false); + accumulativeCounters = new AccumulativeCounters(optionAllowedNodes, optionAllowedInvokes, InliningScopeType.None); } - } else if (outer.accumulativeCounters.inMethodHandleIntrinsification && !inlineForMethodHandleIntrinsification(method)) { + } else if (outer.accumulativeCounters.inMethodHandleIntrinsification() && !inlineForMethodHandleIntrinsification(method)) { /* * Method which is invoked in method handle intrinsification but which is not part of * the method handle apparatus, for example, the target method of a direct method @@ -313,11 +361,11 @@ public AccumulativeInlineScope createAccumulativeInlineScope(AccumulativeInlineS * inlining root. */ depth = 1; - accumulativeCounters = new AccumulativeCounters(optionAllowedNodes, optionAllowedInvokes, false, false); + accumulativeCounters = new AccumulativeCounters(optionAllowedNodes, optionAllowedInvokes, InliningScopeType.None); - } else if (outer.accumulativeCounters.inConstructorInlining && !method.isConstructor()) { + } else if (outer.accumulativeCounters.inConstructorInlining() && !method.isConstructor()) { depth = 1; - accumulativeCounters = new AccumulativeCounters(optionAllowedNodes, optionAllowedInvokes, false, false); + accumulativeCounters = new AccumulativeCounters(optionAllowedNodes, optionAllowedInvokes, InliningScopeType.None); } else { /* Nested inlining (potentially during method handle intrinsification). */ @@ -475,8 +523,11 @@ public boolean processNode(AnalysisMetaAccess metaAccess, AnalysisMethod method, numNodes++; accumulativeCounters.numNodes++; - // With method handle intrinsification we permit all node types to become more effective - return allow || accumulativeCounters.inMethodHandleIntrinsification || accumulativeCounters.inConstructorInlining; + /* + * During inlining (i.e. method handle intrinsification, constructor/scoped method + * inlining), we permit all node types to become more effective. + */ + return allow || accumulativeCounters.inAnyInliningScope(); } @Override diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/phases/SharedGraphBuilderPhase.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/phases/SharedGraphBuilderPhase.java index dd317fc8c547..9f9f476246ca 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/phases/SharedGraphBuilderPhase.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/phases/SharedGraphBuilderPhase.java @@ -27,6 +27,7 @@ import static com.oracle.svm.core.SubstrateUtil.toUnboxedClass; import static jdk.graal.compiler.bytecode.Bytecodes.LDC2_W; +import java.lang.annotation.Annotation; import java.lang.constant.ConstantDescs; import java.lang.invoke.LambdaConversionException; import java.lang.invoke.MethodHandles; @@ -36,8 +37,12 @@ import java.lang.reflect.Field; import java.lang.reflect.Method; import java.lang.reflect.Modifier; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Objects; + +import org.graalvm.nativeimage.AnnotationAccess; import com.oracle.graal.pointsto.constraints.TypeInstantiationException; import com.oracle.graal.pointsto.constraints.UnresolvedElementException; @@ -48,6 +53,7 @@ import com.oracle.graal.pointsto.meta.AnalysisField; import com.oracle.graal.pointsto.meta.AnalysisMetaAccess; import com.oracle.graal.pointsto.meta.AnalysisMethod; +import com.oracle.graal.pointsto.meta.AnalysisType; import com.oracle.svm.common.meta.MultiMethod; import com.oracle.svm.core.bootstrap.BootstrapMethodConfiguration; import com.oracle.svm.core.bootstrap.BootstrapMethodConfiguration.BootstrapMethodRecord; @@ -61,12 +67,14 @@ import com.oracle.svm.core.graal.nodes.FieldOffsetNode; import com.oracle.svm.core.graal.nodes.LoweredDeadEndNode; import com.oracle.svm.core.nodes.SubstrateMethodCallTargetNode; +import com.oracle.svm.core.nodes.foreign.ScopedMethodNode; import com.oracle.svm.core.snippets.SnippetRuntime; import com.oracle.svm.core.util.UserError; import com.oracle.svm.core.util.UserError.UserException; import com.oracle.svm.core.util.VMError; import com.oracle.svm.hosted.ExceptionSynthesizer; import com.oracle.svm.hosted.LinkAtBuildTimeSupport; +import com.oracle.svm.hosted.SharedArenaSupport; import com.oracle.svm.hosted.code.FactoryMethodSupport; import com.oracle.svm.hosted.code.SubstrateCompilationDirectives; import com.oracle.svm.hosted.nodes.DeoptProxyNode; @@ -76,10 +84,14 @@ import jdk.graal.compiler.api.replacements.Fold; import jdk.graal.compiler.core.common.calc.Condition; import jdk.graal.compiler.core.common.memory.MemoryOrderMode; +import jdk.graal.compiler.core.common.type.Stamp; import jdk.graal.compiler.core.common.type.StampFactory; import jdk.graal.compiler.core.common.type.StampPair; import jdk.graal.compiler.core.common.type.TypeReference; +import jdk.graal.compiler.debug.Assertions; +import jdk.graal.compiler.debug.DebugContext; import jdk.graal.compiler.debug.GraalError; +import jdk.graal.compiler.graph.Node; import jdk.graal.compiler.graph.Node.NodeIntrinsic; import jdk.graal.compiler.java.BciBlockMapping; import jdk.graal.compiler.java.BciBlockMapping.BciBlock; @@ -91,6 +103,7 @@ import jdk.graal.compiler.nodes.CallTargetNode; import jdk.graal.compiler.nodes.CallTargetNode.InvokeKind; import jdk.graal.compiler.nodes.ConstantNode; +import jdk.graal.compiler.nodes.ControlSinkNode; import jdk.graal.compiler.nodes.EndNode; import jdk.graal.compiler.nodes.FieldLocationIdentity; import jdk.graal.compiler.nodes.FixedNode; @@ -102,11 +115,13 @@ import jdk.graal.compiler.nodes.LogicConstantNode; import jdk.graal.compiler.nodes.LogicNode; import jdk.graal.compiler.nodes.MergeNode; +import jdk.graal.compiler.nodes.NodeView; import jdk.graal.compiler.nodes.StateSplit; import jdk.graal.compiler.nodes.StructuredGraph; import jdk.graal.compiler.nodes.UnreachableBeginNode; import jdk.graal.compiler.nodes.UnreachableControlSinkNode; import jdk.graal.compiler.nodes.UnreachableNode; +import jdk.graal.compiler.nodes.UnwindNode; import jdk.graal.compiler.nodes.ValueNode; import jdk.graal.compiler.nodes.ValuePhiNode; import jdk.graal.compiler.nodes.calc.IsNullNode; @@ -132,6 +147,7 @@ import jdk.graal.compiler.phases.OptimisticOptimizations; import jdk.graal.compiler.replacements.SnippetTemplate; import jdk.internal.access.SharedSecrets; +import jdk.internal.foreign.MemorySessionImpl; import jdk.vm.ci.meta.ConstantPool.BootstrapMethodInvocation; import jdk.vm.ci.meta.JavaConstant; import jdk.vm.ci.meta.JavaField; @@ -139,9 +155,11 @@ import jdk.vm.ci.meta.JavaMethod; import jdk.vm.ci.meta.JavaType; import jdk.vm.ci.meta.JavaTypeProfile; +import jdk.vm.ci.meta.MetaAccessProvider; import jdk.vm.ci.meta.PrimitiveConstant; import jdk.vm.ci.meta.ResolvedJavaField; import jdk.vm.ci.meta.ResolvedJavaMethod; +import jdk.vm.ci.meta.ResolvedJavaMethod.Parameter; import jdk.vm.ci.meta.ResolvedJavaType; import jdk.vm.ci.meta.UnresolvedJavaType; @@ -159,6 +177,21 @@ protected void run(StructuredGraph graph) { public abstract static class SharedBytecodeParser extends BytecodeParser { + private static final Class SCOPED_SUBSTRATE_ANNOTATION; + private static final Executable SESSION_EXCEPTION_HANDLER_METHOD; + private static final Class MAPPED_MEMORY_UTILS_PROXY_CLASS; + + static { + SCOPED_SUBSTRATE_ANNOTATION = ReflectionUtil.lookupClass(true, "com.oracle.svm.core.foreign.Target_jdk_internal_misc_ScopedMemoryAccess_Scoped"); + Class substrateForeignUtilClass = ReflectionUtil.lookupClass(true, "com.oracle.svm.core.foreign.SubstrateForeignUtil"); + SESSION_EXCEPTION_HANDLER_METHOD = substrateForeignUtilClass != null + ? ReflectionUtil.lookupMethod(substrateForeignUtilClass, "sessionExceptionHandler", MemorySessionImpl.class, Object.class, long.class) + : null; + MAPPED_MEMORY_UTILS_PROXY_CLASS = ReflectionUtil.lookupClass(true, "jdk.internal.access.foreign.MappedMemoryUtilsProxy"); + } + + protected List scopedMemorySessions; + private int currentDeoptIndex; private final boolean explicitExceptionEdges; @@ -195,6 +228,7 @@ protected boolean shouldVerifyFrameStates() { return false; } + @SuppressWarnings("unchecked") @Override protected void build(FixedWithNextNode startInstruction, FrameStateBuilder startFrameState) { if (!shouldVerifyFrameStates()) { @@ -211,6 +245,261 @@ protected void build(FixedWithNextNode startInstruction, FrameStateBuilder start assert deoptProxy.hasProxyPoint(); } } + + if (SCOPED_SUBSTRATE_ANNOTATION != null && SharedArenaSupport.SCOPED_ANNOTATION != null && graph.method() != null) { + try { + if (AnnotationAccess.isAnnotationPresent(method, (Class) SCOPED_SUBSTRATE_ANNOTATION) && SharedArenaSupport.isAvailable()) { + // substituted, only add the scoped node + introduceScopeNodes(); + } + if (AnnotationAccess.isAnnotationPresent(method, SharedArenaSupport.SCOPED_ANNOTATION) && SharedArenaSupport.isAvailable()) { + // not substituted, also instrument + instrumentScopedMethod(); + } + } catch (Throwable e) { + throw GraalError.shouldNotReachHere(e); + } + } + } + + /** + * Adds necessary instrumentation for scoped memory accesses. This includes an additional + * exception handler per session argument that can be used to later create validity checks + * per memory session. + */ + private void instrumentScopedMethod() { + introduceScopeInstrumentationExceptionHandlers(); + introduceScopeNodes(); + } + + /** + * Takes a given {@code @Scope} annotated method using session arguments and introduces an + * exception handler per session that can be later duplicated. Code like + * {@link jdk.internal.misc.ScopedMemoryAccess#getByte(MemorySessionImpl, Object, long)} + * which calls the {@code getByteInternal} method + * + *
+         * {@code @Scoped}
+         * private byte getByteInternal(MemorySessionImpl session, Object base, long offset) {
+         *     try {
+         *         if (session != null) {
+         *             session.checkValidStateRaw();
+         *         }
+         *         return UNSAFE.getByte(base, offset);
+         *     } finally {
+         *         Reference.reachabilityFence(session);
+         *     }
+         * }
+         * 
+ *

+ * + * is transformed into + * + *

+         * {@code @Scoped}
+         * private byte getByteInternal(MemorySessionImpl session, Object base, long offset) {
+         *     try {
+         *         SubstrateForeignUtil.sessionExceptionHandler(); // can also throw and the
+         *         // exception handlers are merged
+         *         if (session != null) {
+         *             session.checkValidStateRaw();
+         *         }
+         *         return UNSAFE.getByte(base, offset);
+         *     } finally {
+         *         Reference.reachabilityFence(session);
+         *     }
+         * }
+         * 
+ */ + private void introduceScopeInstrumentationExceptionHandlers() { + ResolvedJavaMethod sessionCheckMethod = getMetaAccess().lookupJavaMethod(SESSION_EXCEPTION_HANDLER_METHOD); + + assert sessionCheckMethod != null; + List sessionsToCheck = getSessionArguments(method, graph, getMetaAccess()); + List unwinds = graph.getNodes(UnwindNode.TYPE).snapshot(); + // Doing a hosted compile of the scoped memory access methods every method must have an + // exception handler or exception path unwinding to the caller, There must always be + // exaclty ONE such path. + GraalError.guarantee(unwinds.size() == 1, "Exactly one unwind node expected."); + + final UnwindNode unwind = unwinds.get(0); + FrameState unwindMergeStateTemplate = null; + if (unwind.predecessor() instanceof MergeNode m) { + unwindMergeStateTemplate = m.stateAfter().duplicateWithVirtualState(); + } + + for (SessionCheck sessionCheck : sessionsToCheck) { + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "Before inserting exception handlers for scoped unwind paths"); + assert sessionCheck.session != null : Assertions.errorMessage("At least the session must never be null", sessionsToCheck); + ValueNode[] args = new ValueNode[]{sessionCheck.session, sessionCheck.base == null ? ConstantNode.defaultForKind(JavaKind.Object, graph) : sessionCheck.base, + sessionCheck.offset == null ? ConstantNode.defaultForKind(JavaKind.Long, graph) : sessionCheck.offset}; + MethodCallTargetNode mct = graph.addWithoutUnique(new MethodCallTargetNode(InvokeKind.Static, sessionCheckMethod, args, StampPair.createSingle(StampFactory.forVoid()), null)); + + ResolvedJavaType tt = getMetaAccess().lookupJavaType(Throwable.class); + assert tt != null; + Stamp s = StampFactory.objectNonNull(TypeReference.createTrustedWithoutAssumptions(tt)); + ExceptionObjectNode eon = graph.add(new ExceptionObjectNode(s)); + GraalError.guarantee(eon.stamp(NodeView.DEFAULT) != null, "Must have a stamp %s", eon); + + /* + * Build a fake state for the exception handler that is not existing in the + * bytecode. This is fine because we will never deopt here. Note that this is a real + * exception state with a bci, and not only one used for rethrowing. + */ + eon.setStateAfter(graph.addOrUnique(new FrameState(0, eon, graph.start().stateAfter().getCode(), false))); + + /* a random bci 0, we are injecting an artificial call */ + final int callBCI = 0; + InvokeWithExceptionNode invoke = graph.add(new InvokeWithExceptionNode(mct, eon, callBCI)); + invoke.setStateAfter(graph.start().stateAfter().duplicateWithVirtualState()); + invoke.stateAfter().invalidateForDeoptimization(); + + // hang the invoke in + FixedNode afterStart = graph.start().next(); + graph.start().setNext(null); + invoke.setNext(afterStart); + graph.start().setNext(invoke); + + // hang exception handlers in + MergeNode newMergeBeforeUnwind = graph.add(new MergeNode()); + EndNode oldUnwindEnd = graph.add(new EndNode()); + EndNode newUnwindEnd = graph.add(new EndNode()); + + // connect exception object to new end + eon.setNext(newUnwindEnd); + + FixedWithNextNode beforeUnwind = (FixedWithNextNode) unwind.predecessor(); + beforeUnwind.setNext(null); + beforeUnwind.setNext(oldUnwindEnd); + + newMergeBeforeUnwind.setNext(unwind); + newMergeBeforeUnwind.addForwardEnd(oldUnwindEnd); + newMergeBeforeUnwind.addForwardEnd(newUnwindEnd); + + ValuePhiNode eonPhi = graph.addWithoutUnique(new ValuePhiNode(unwind.exception().stamp(NodeView.DEFAULT).unrestricted(), newMergeBeforeUnwind)); + eonPhi.addInput(unwind.exception()); + eonPhi.addInput(eon); + + assert Objects.requireNonNull(unwindMergeStateTemplate).values().size() == 1 : Assertions.errorMessage("Exception path should only have exception object on stack", + unwindMergeStateTemplate); + + unwindMergeStateTemplate.replaceFirstInput(unwind.exception(), eonPhi); + newMergeBeforeUnwind.setStateAfter(unwindMergeStateTemplate); + + // duplicate for next occurrence + unwindMergeStateTemplate = unwindMergeStateTemplate.duplicateWithVirtualState(); + unwind.setException(eonPhi); + graph.getDebug().dump(DebugContext.VERY_DETAILED_LEVEL, graph, "After inserting exception handlers for scoped unwind paths %s", invoke); + } + } + + /** + * Represents a memory session triplet in {@code @Scoped} annotated methods. A scoped memory + * access normally consists of the session object, a base object and an offset. + */ + record SessionCheck(ValueNode session, + ValueNode base, + ValueNode offset) { + } + + /** + * Computes any arguments related to memory session checks in a scoped method. + *

+ * We are looking for 2 kinds of patterns here + *

+ * 1: parameters starting with {@code MemorySessionImpl session, Object o, long offset} + * followed by more we are not interested in + *

+ * and + *

+ * 2: parameters starting with 2 sessions + * {@code MemorySessionImpl aSession, MemorySessionImpl bSession, Object a, long aOffset, Object b, long bOffset} + * followed by more we are not interested in. + */ + private static List getSessionArguments(ResolvedJavaMethod method, StructuredGraph graph, MetaAccessProvider metaAccess) { + assert method != null; + final ResolvedJavaType sessionType = ((AnalysisType) metaAccess.lookupJavaType(MemorySessionImpl.class)).getWrapped(); + assert sessionType != null; + final ResolvedJavaType baseType = ((AnalysisType) metaAccess.lookupJavaType(Object.class)).getWrapped(); + assert baseType != null; + final ResolvedJavaType offsetType = ((AnalysisType) metaAccess.lookupJavaType(long.class)).getWrapped(); + assert offsetType != null; + final ResolvedJavaType utilsType = ((AnalysisType) metaAccess.lookupJavaType(MAPPED_MEMORY_UTILS_PROXY_CLASS)).getWrapped(); + assert utilsType != null; + + Parameter[] p = method.getParameters(); + if (!p[0].getType().equals(sessionType)) { + // no sessions involved + return List.of(); + } + if (p.length < 3) { + // length does not match + return List.of(); + } + + int pIndex = method.hasReceiver() ? 1 : 0; + if (p[1].getType().equals(utilsType)) { + // eg forceInternal(MemorySessionImpl session, MappedMemoryUtilsProxy mappedUtils, + // FileDescriptor fd, long address, boolean isSync, long index, long length) { + ValueNode session = graph.getParameter(pIndex++); + pIndex++; // skip mappedUtils + pIndex++; // skip fd + ValueNode offset = graph.getParameter(pIndex++); + SessionCheck check = new SessionCheck(session, null, offset); + verifySession(sessionType, baseType, offsetType, check, metaAccess); + return List.of(check); + } else if (p[1].getType().equals(sessionType)) { + // 2 session case + ValueNode s1Session = graph.getParameter(pIndex++); + ValueNode s2Session = graph.getParameter(pIndex++); + ValueNode s1Base = graph.getParameter(pIndex++); + ValueNode s1Offset = graph.getParameter(pIndex++); + ValueNode s2Base = graph.getParameter(pIndex++); + ValueNode s2Offset = graph.getParameter(pIndex++); + SessionCheck s1 = new SessionCheck(s1Session, s1Base, s1Offset); + SessionCheck s2 = new SessionCheck(s2Session, s2Base, s2Offset); + verifySession(sessionType, baseType, offsetType, s1, metaAccess); + verifySession(sessionType, baseType, offsetType, s2, metaAccess); + return List.of(s1, s2); + } else { + // 1 session case + ValueNode session = graph.getParameter(pIndex++); + ValueNode base = graph.getParameter(pIndex++); + ValueNode offset = graph.getParameter(pIndex++); + SessionCheck check = new SessionCheck(session, base, offset); + verifySession(sessionType, baseType, offsetType, check, metaAccess); + return List.of(check); + } + } + + private static void verifySession(ResolvedJavaType sessionType, ResolvedJavaType baseType, ResolvedJavaType offsetType, SessionCheck check, MetaAccessProvider metaAccess) { + GraalError.guarantee(sessionType.isAssignableFrom(((AnalysisType) check.session.stamp(NodeView.DEFAULT).javaType(metaAccess)).getWrapped()), "Session type must match, but is %s", + check.session.stamp(NodeView.DEFAULT)); + if (check.base != null) { + // base can be null + GraalError.guarantee(baseType.isAssignableFrom(((AnalysisType) check.base.stamp(NodeView.DEFAULT).javaType(metaAccess)).getWrapped()), "Base type must match, but is %s", + check.base.stamp(NodeView.DEFAULT)); + } + GraalError.guarantee(offsetType.isAssignableFrom(((AnalysisType) check.offset.stamp(NodeView.DEFAULT).javaType(metaAccess)).getWrapped()), "Offset type must match, but is %s", + check.offset.stamp(NodeView.DEFAULT)); + } + + /** + * This method has memory accesses to (potentially shared) memory arenas (project panama). + * In order to properly guarantee there are no unknown calls left in code potentially + * accessing a shared arena, we mark this method's region (start to each sink) with scoped. + *

+ * Later when we expand exception handlers for the shared arena code, we verify this based + * on the scopes created here. + */ + private void introduceScopeNodes() { + ScopedMethodNode startScope = graph.add(new ScopedMethodNode()); + graph.addAfterFixed(graph.start(), startScope); + for (Node n : graph.getNodes()) { + if (n instanceof ControlSinkNode sink) { + graph.addBeforeFixed(sink, graph.add(new ScopedMethodNode(startScope))); + } + } } @Override @@ -232,7 +521,7 @@ private boolean checkWordTypes() { * {@link Fold} and {@link NodeIntrinsic} can be deferred during parsing/decoding. Only by * the end of {@linkplain SnippetTemplate#instantiate Snippet instantiation} do they need to * have been processed. - * + *

* This is how SVM handles snippets. They are parsed with plugins disabled and then encoded * and stored in the image. When the snippet is needed at runtime the graph is decoded and * the plugins are run during the decoding process. If they aren't handled at this point @@ -541,7 +830,7 @@ private void handleUnresolvedMethod(JavaMethod javaMethod) { * JVMCI does not report that information back when method resolution fails. We need to look * down the class hierarchy to see if there would be an appropriate method with a matching * signature which is just not accessible. - * + *

* We do all the method lookups (to search for a method with the same signature as * searchMethod) using reflection and not JVMCI because the lookup can throw all sorts of * errors, and we want to ignore the errors without any possible side effect on AnalysisType @@ -1068,7 +1357,7 @@ private Object loadConstantDynamic(int cpi, int opcode) { * Therefore, we cannot just treat it as "safe at build time". The class * initialization is also completely useless because the invoking class must be * already initialized by the time the boostrap method is executed. - * + * * We replicate the implementation of the bootstrap method here without doing * the class initialization. */ @@ -1510,5 +1799,18 @@ private ResolvedJavaMethod lookupResolvedJavaMethod(Class clazz, String name, } } } + + @Override + public void setIsParsingScopedMemoryMethod(ValueNode scopedMemorySession) { + if (scopedMemorySessions == null) { + scopedMemorySessions = new ArrayList<>(); + } + scopedMemorySessions.add(scopedMemorySession); + } + + @Override + public List getScopedMemorySessions() { + return scopedMemorySessions; + } } } diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/snippets/SubstrateGraphBuilderPlugins.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/snippets/SubstrateGraphBuilderPlugins.java index edc231c5d6ca..e371cedcc9ed 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/snippets/SubstrateGraphBuilderPlugins.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/snippets/SubstrateGraphBuilderPlugins.java @@ -38,8 +38,6 @@ import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import java.util.stream.Stream; -import jdk.graal.compiler.core.common.LibGraalSupport; -import jdk.graal.compiler.core.common.NativeImageSupport; import org.graalvm.nativeimage.AnnotationAccess; import org.graalvm.nativeimage.ImageInfo; import org.graalvm.nativeimage.ImageSingletons; @@ -57,6 +55,7 @@ import com.oracle.graal.pointsto.AbstractAnalysisEngine; import com.oracle.graal.pointsto.infrastructure.OriginalClassProvider; import com.oracle.graal.pointsto.meta.AnalysisType; +import com.oracle.svm.core.ArenaIntrinsics; import com.oracle.svm.core.FrameAccess; import com.oracle.svm.core.MissingRegistrationSupport; import com.oracle.svm.core.NeverInline; @@ -94,6 +93,7 @@ import com.oracle.svm.core.layeredimagesingleton.LayeredImageSingletonBuilderFlags; import com.oracle.svm.core.layeredimagesingleton.LayeredImageSingletonSupport; import com.oracle.svm.core.layeredimagesingleton.MultiLayeredImageSingleton; +import com.oracle.svm.core.nodes.foreign.MemoryArenaValidInScopeNode; import com.oracle.svm.core.option.HostedOptionKey; import com.oracle.svm.core.snippets.KnownIntrinsics; import com.oracle.svm.core.util.UserError; @@ -107,6 +107,8 @@ import com.oracle.svm.hosted.substitute.AnnotationSubstitutionProcessor; import jdk.graal.compiler.core.common.CompressEncoding; +import jdk.graal.compiler.core.common.LibGraalSupport; +import jdk.graal.compiler.core.common.NativeImageSupport; import jdk.graal.compiler.core.common.type.AbstractObjectStamp; import jdk.graal.compiler.core.common.type.IntegerStamp; import jdk.graal.compiler.core.common.type.StampFactory; @@ -117,6 +119,7 @@ import jdk.graal.compiler.nodes.ComputeObjectAddressNode; import jdk.graal.compiler.nodes.ConstantNode; import jdk.graal.compiler.nodes.DynamicPiNode; +import jdk.graal.compiler.nodes.FieldLocationIdentity; import jdk.graal.compiler.nodes.FixedNode; import jdk.graal.compiler.nodes.FixedWithNextNode; import jdk.graal.compiler.nodes.FullInfopointNode; @@ -165,6 +168,7 @@ import jdk.graal.compiler.replacements.nodes.VectorizedMismatchNode; import jdk.graal.compiler.serviceprovider.JavaVersionUtil; import jdk.graal.compiler.word.WordCastNode; +import jdk.internal.foreign.MemorySessionImpl; import jdk.vm.ci.code.Architecture; import jdk.vm.ci.meta.DeoptimizationAction; import jdk.vm.ci.meta.JavaConstant; @@ -195,6 +199,7 @@ public static void registerInvocationPlugins(AnnotationSubstitutionProcessor ann boolean supportsStubBasedPlugins) { // register the substratevm plugins + registerArenaPlugins(plugins); registerSystemPlugins(plugins); registerReflectionPlugins(plugins, replacements); registerImageInfoPlugins(plugins); @@ -218,6 +223,21 @@ public static void registerInvocationPlugins(AnnotationSubstitutionProcessor ann } } + private static void registerArenaPlugins(InvocationPlugins plugins) { + Registration r = new Registration(plugins, ArenaIntrinsics.class); + r.register(new RequiredInlineOnlyInvocationPlugin("checkArenaValidInScope", MemorySessionImpl.class, Object.class, long.class) { + @Override + public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode session, ValueNode ptrOne, ValueNode ptrTwo) { + b.setIsParsingScopedMemoryMethod(session); + MemoryArenaValidInScopeNode result = new MemoryArenaValidInScopeNode(session, new FieldLocationIdentity(b.getMetaAccess().lookupJavaField(MemoryArenaValidInScopeNode.STATE_FIELD))); + b.addPush(JavaKind.Long, result); + result.addScopeAssociatedValue(ptrOne); + result.addScopeAssociatedValue(ptrTwo); + return true; + } + }); + } + private static void registerSerializationPlugins(ImageClassLoader loader, InvocationPlugins plugins, ParsingReason reason) { if (reason.duringAnalysis() && reason != ParsingReason.JITCompilation) { Registration serializationFilter = new Registration(plugins, ObjectInputFilter.Config.class); @@ -529,8 +549,7 @@ static Class[] extractClassArray(GraphBuilderContext b, AnnotationSubstitutio */ return b.getSnippetReflection().asObject(Class[].class, originalArrayNode.asJavaConstant()); - } else if (originalArrayNode instanceof AllocatedObjectNode && StampTool.isAlwaysArray(originalArrayNode)) { - AllocatedObjectNode allocatedObjectNode = (AllocatedObjectNode) originalArrayNode; + } else if (originalArrayNode instanceof AllocatedObjectNode allocatedObjectNode && StampTool.isAlwaysArray(originalArrayNode)) { if (!allocatedObjectNode.getVirtualObject().type().equals(b.getMetaAccess().lookupJavaType(Class[].class))) { /* Not allocating a Class[] array. */ return null; @@ -560,13 +579,12 @@ static Class[] extractClassArray(GraphBuilderContext b, AnnotationSubstitutio } throw VMError.shouldNotReachHere("Must have found the virtual object"); - } else if (originalArrayNode instanceof NewArrayNode) { + } else if (originalArrayNode instanceof NewArrayNode newArray) { /* * Find the elements written to the array. If the array length is a constant, all * written elements are constants and all array elements are filled then return the * array elements. */ - NewArrayNode newArray = (NewArrayNode) originalArrayNode; if (!newArray.elementType().equals(b.getMetaAccess().lookupJavaType(Class.class))) { /* Not allocating a Class[] array. */ return null; @@ -588,8 +606,7 @@ static Class[] extractClassArray(GraphBuilderContext b, AnnotationSubstitutio */ Class[] result = new Class[newArrayLength]; FixedNode successor = unwrapNode(newArray.next()); - while (successor instanceof StoreIndexedNode) { - StoreIndexedNode store = (StoreIndexedNode) successor; + while (successor instanceof StoreIndexedNode store) { if (getDeoptProxyOriginalValue(store.array()).equals(newArray)) { if (!store.index().isJavaConstant()) { return null; @@ -863,14 +880,11 @@ private static boolean isValidField(Field targetField, boolean isSunMiscUnsafe) /* A NullPointerException will be thrown at run time for this call. */ return false; } - if (isSunMiscUnsafe && (targetField.getDeclaringClass().isRecord() || targetField.getDeclaringClass().isHidden())) { - /* - * sun.misc.Unsafe performs a few more checks than jdk.internal.misc.Unsafe to - * explicitly disallow hidden classes and records. - */ - return false; - } - return true; + /* + * sun.misc.Unsafe performs a few more checks than jdk.internal.misc.Unsafe to explicitly + * disallow hidden classes and records. + */ + return !isSunMiscUnsafe || (!targetField.getDeclaringClass().isRecord() && !targetField.getDeclaringClass().isHidden()); } private static boolean processStaticFieldBase(GraphBuilderContext b, Receiver receiver, Field targetField, boolean isSunMiscUnsafe) {