diff --git a/dev-support/bin/hadoop.sh b/dev-support/bin/hadoop.sh index dbda5cad8cfb7..aa3f68ea710bb 100755 --- a/dev-support/bin/hadoop.sh +++ b/dev-support/bin/hadoop.sh @@ -582,6 +582,7 @@ function shadedclient_rebuild extra=( "-Dtest=NoUnitTests" + "-Dsurefire.failIfNoSpecifiedTests=false" "-Dmaven.javadoc.skip=true" "-Dcheckstyle.skip=true" "-Dspotbugs.skip=true" @@ -619,7 +620,7 @@ function shadedclient_rebuild echo_and_redirect "${logfile}" \ "${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \ "${modules[@]}" \ - -DskipShade -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true \ + -DskipShade -Dtest=NoUnitTests -Dsurefire.failIfNoSpecifiedTests=false -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true \ -Dspotbugs.skip=true ${extra[*]} count=$("${GREP}" -c '\[ERROR\]' "${logfile}") diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java index 30e65efe10cba..59ba698ba1bab 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java @@ -19,6 +19,7 @@ import org.apache.hadoop.security.authentication.server.HttpConstants; import org.apache.hadoop.security.authentication.util.AuthToken; import org.apache.hadoop.security.authentication.util.KerberosUtil; +import org.apache.hadoop.util.SubjectUtil; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; @@ -35,12 +36,10 @@ import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; -import java.security.AccessControlContext; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; @@ -300,8 +299,7 @@ private boolean isNegotiate(HttpURLConnection conn) throws IOException { private void doSpnegoSequence(final AuthenticatedURL.Token token) throws IOException, AuthenticationException { try { - AccessControlContext context = AccessController.getContext(); - Subject subject = Subject.getSubject(context); + Subject subject = SubjectUtil.current(); if (subject == null || (!KerberosUtil.hasKerberosKeyTab(subject) && !KerberosUtil.hasKerberosTicket(subject))) { @@ -315,10 +313,10 @@ private void doSpnegoSequence(final AuthenticatedURL.Token token) if (LOG.isDebugEnabled()) { LOG.debug("Using subject: " + subject); } - Subject.doAs(subject, new PrivilegedExceptionAction() { + SubjectUtil.callAs(subject, new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { GSSContext gssContext = null; try { GSSManager gssManager = GSSManager.getInstance(); @@ -361,11 +359,11 @@ public Void run() throws Exception { return null; } }); - } catch (PrivilegedActionException ex) { - if (ex.getException() instanceof IOException) { - throw (IOException) ex.getException(); + } catch (CompletionException ex) { + if (ex.getCause() instanceof IOException) { + throw (IOException) ex.getCause(); } else { - throw new AuthenticationException(ex.getException()); + throw new AuthenticationException(ex.getCause()); } } catch (LoginException ex) { throw new AuthenticationException(ex); diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java index 110d706008acd..2f43916695606 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java @@ -19,6 +19,7 @@ import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.authentication.util.KerberosUtil; +import org.apache.hadoop.util.SubjectUtil; import org.ietf.jgss.GSSException; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSCredential; @@ -43,6 +44,8 @@ import java.util.HashSet; import java.util.Properties; import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; import java.util.regex.Pattern; /** @@ -201,15 +204,20 @@ public void init(Properties config) throws ServletException { } try { - gssManager = Subject.doAs(serverSubject, - new PrivilegedExceptionAction() { + gssManager = SubjectUtil.callAs(serverSubject, + new Callable() { @Override - public GSSManager run() throws Exception { + public GSSManager call() throws Exception { return GSSManager.getInstance(); } }); - } catch (PrivilegedActionException ex) { - throw ex.getException(); + } catch (CompletionException ex) { + Throwable cause = ex.getCause(); + if (cause instanceof Exception) { + throw (Exception)cause; + } else { + throw new RuntimeException(ex.getCause()); + } } } catch (Exception ex) { throw new ServletException(ex); @@ -334,19 +342,19 @@ public AuthenticationToken authenticate(HttpServletRequest request, "Invalid server principal " + serverPrincipal + "decoded from client request"); } - token = Subject.doAs(serverSubject, - new PrivilegedExceptionAction() { + token = SubjectUtil.callAs(serverSubject, + new Callable() { @Override - public AuthenticationToken run() throws Exception { + public AuthenticationToken call() throws Exception { return runWithPrincipal(serverPrincipal, clientToken, base64, response); } }); - } catch (PrivilegedActionException ex) { - if (ex.getException() instanceof IOException) { - throw (IOException) ex.getException(); + } catch (CompletionException ex) { + if (ex.getCause() instanceof IOException) { + throw (IOException) ex.getCause(); } else { - throw new AuthenticationException(ex.getException()); + throw new AuthenticationException(ex.getCause()); } } catch (Exception ex) { throw new AuthenticationException(ex); diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/util/SubjectUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/util/SubjectUtil.java new file mode 100644 index 0000000000000..57219daa7f41f --- /dev/null +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/util/SubjectUtil.java @@ -0,0 +1,250 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.UndeclaredThrowableException; +import java.security.PrivilegedAction; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; + +import javax.security.auth.Subject; + +import org.apache.hadoop.classification.InterfaceAudience; + +@InterfaceAudience.Private() +public class SubjectUtil { + private static final MethodHandle CALL_AS = lookupCallAs(); + private static final MethodHandle CURRENT = lookupCurrent(); + private static boolean HAS_CALL_AS = true; + + private SubjectUtil() { + } + + private static MethodHandle lookupCallAs() { + MethodHandles.Lookup lookup = MethodHandles.lookup(); + try { + try { + // Subject.doAs() is deprecated for removal and replaced by Subject.callAs(). + // Lookup first the new API, since for Java versions where both exist, the + // new API delegates to the old API (for example Java 18, 19 and 20). + // Otherwise (Java 17), lookup the old API. + return lookup.findStatic(Subject.class, "callAs", + MethodType.methodType(Object.class, Subject.class, Callable.class)); + } catch (NoSuchMethodException x) { + HAS_CALL_AS = false; + try { + // Lookup the old API. + MethodType oldSignature = MethodType.methodType(Object.class, Subject.class, + PrivilegedExceptionAction.class); + MethodHandle doAs = lookup.findStatic(Subject.class, "doAs", oldSignature); + // Convert the Callable used in the new API to the PrivilegedAction used in the + // old + // API. + MethodType convertSignature = MethodType.methodType(PrivilegedExceptionAction.class, + Callable.class); + MethodHandle converter = lookup.findStatic(SubjectUtil.class, "callableToPrivilegedExceptionAction", + convertSignature); + return MethodHandles.filterArguments(doAs, 1, converter); + } catch (NoSuchMethodException e) { + throw new AssertionError(e); + } + } + } catch (IllegalAccessException e) { + throw new AssertionError(e); + } + } + + private static MethodHandle lookupCurrent() { + MethodHandles.Lookup lookup = MethodHandles.lookup(); + try { + // Subject.getSubject(AccessControlContext) is deprecated for removal and + // replaced by + // Subject.current(). + // Lookup first the new API, since for Java versions where both exists, the + // new API delegates to the old API (for example Java 18, 19 and 20). + // Otherwise (Java 17), lookup the old API. + return lookup.findStatic(Subject.class, "current", MethodType.methodType(Subject.class)); + } catch (NoSuchMethodException e) { + MethodHandle getContext = lookupGetContext(); + MethodHandle getSubject = lookupGetSubject(); + return MethodHandles.filterReturnValue(getContext, getSubject); + } catch (IllegalAccessException e) { + throw new AssertionError(e); + } + } + + private static MethodHandle lookupGetSubject() { + MethodHandles.Lookup lookup = MethodHandles.lookup(); + try { + Class contextklass = ClassLoader.getSystemClassLoader().loadClass("java.security.AccessControlContext"); + return lookup.findStatic(Subject.class, "getSubject", MethodType.methodType(Subject.class, contextklass)); + } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException e) { + throw new AssertionError(e); + } + } + + private static MethodHandle lookupGetContext() { + try { + // Use reflection to work with Java versions that have and don't have + // AccessController. + Class controllerKlass = ClassLoader.getSystemClassLoader().loadClass("java.security.AccessController"); + Class contextklass = ClassLoader.getSystemClassLoader().loadClass("java.security.AccessControlContext"); + + MethodHandles.Lookup lookup = MethodHandles.lookup(); + return lookup.findStatic(controllerKlass, "getContext", MethodType.methodType(contextklass)); + } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException e) { + throw new AssertionError(e); + } + } + + /** + * Maps to Subject.callAs() if available, otherwise maps to Subject.doAs(). + * It also wraps the Callable so that the Subject is propagated to the new thread + * in all Java versions. + * + * @param subject the subject this action runs as + * @param action the action to run + * @return the result of the action + * @param the type of the result + * @throws CompletionException + */ + public static T callAs(Subject subject, Callable action) throws CompletionException { + try { + return (T) CALL_AS.invoke(subject, action); + } catch (PrivilegedActionException e) { + throw new CompletionException(e.getCause()); + } catch (Throwable t) { + throw sneakyThrow(t); + } + } + + /** + * Maps action to a Callable, and delegates to callAs(). On older JVMs, the + * action may be double wrapped (into Callable, and then back into + * PrivilegedAction). + * + * @param subject the subject this action runs as + * @param action action the action to run + * @return the result of the action + */ + public static T doAs(Subject subject, PrivilegedAction action) { + return callAs(subject, privilegedActionToCallable(action)); + } + + /** + * Maps action to a Callable, and delegates to callAs(). On older JVMs, the + * action may be double wrapped (into Callable, and then back into + * PrivilegedAction). + * + * @param subject the subject this action runs as + * @param action action the action to run + * @return the result of the action + */ + public static T doAs(Subject subject, PrivilegedExceptionAction action) throws PrivilegedActionException { + try { + return callAs(subject, privilegedExceptionActionToCallable(action)); + } catch (CompletionException ce) { + try { + Exception cause = (Exception) (ce.getCause()); + throw new PrivilegedActionException(cause); + } catch (ClassCastException castException) { + // This should never happen, as PrivilegedExceptionAction should not wrap + // non-checked exceptions + throw new PrivilegedActionException(new UndeclaredThrowableException(ce.getCause())); + } + } + } + + /** + * Maps to Subject.currect() is available, otherwise maps to + * Subject.getSubject() + * + * @return the current subject + */ + public static Subject current() { + try { + return (Subject) CURRENT.invoke(); + } catch (Throwable t) { + throw sneakyThrow(t); + } + } + + /** + * Wraps Runnable in callAs() to preserve pre-JEP486 behaviour + * + * Note that this snapshots the subject at the time wrap() is called. + * If the subject is changed between calling this and starting the thread, + * that is going to be ignored. + * + * @param r Runnable + * @return r wrapped in callAs() + */ + public static Runnable wrap(Runnable r) { + if (!HAS_CALL_AS) { + return r; + } + Subject s = current(); + return () -> callAs(s, () -> { + r.run(); + return null; + }); + } + + /** + * Wraps Callable in callAs() to preserve pre-JEP486 behaviour + * + * Note that this snapshots the subject at the time wrap() is called. + * If the subject is changed between calling this and starting the thread, + * that is going to be ignored. + * + * @param the return type of the Callable + * @param c Callable + * @return c wrapped in callAs() + */ + public static Callable wrap(Callable c) { + if (!HAS_CALL_AS) { + return c; + } + Subject s = current(); + return () -> callAs(s, () -> c.call()); + } + + @SuppressWarnings("unused") + private static PrivilegedExceptionAction callableToPrivilegedExceptionAction(Callable callable) { + return callable::call; + } + + private static Callable privilegedExceptionActionToCallable(PrivilegedExceptionAction action) { + return action::run; + } + + private static Callable privilegedActionToCallable(PrivilegedAction action) { + return action::run; + } + + @SuppressWarnings("unchecked") + private static RuntimeException sneakyThrow(Throwable e) throws E { + throw (E) e; + } +} diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java index 293871bcd0620..05cc4e1ebc481 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java @@ -20,17 +20,17 @@ import javax.security.auth.login.LoginContext; import org.apache.hadoop.security.authentication.util.KerberosUtil; +import org.apache.hadoop.util.SubjectUtil; import java.io.File; import java.security.Principal; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; import java.util.UUID; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; @@ -105,7 +105,13 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) { } } + @Deprecated public static T doAs(String principal, final Callable callable) throws Exception { + return callAs(principal, callable); + } + + + public static T callAs(String principal, final Callable callable) throws Exception { LoginContext loginContext = null; try { Set principals = new HashSet<>(); @@ -114,14 +120,19 @@ public static T doAs(String principal, final Callable callable) throws Ex loginContext = new LoginContext("", subject, null, new KerberosConfiguration(principal)); loginContext.login(); subject = loginContext.getSubject(); - return Subject.doAs(subject, new PrivilegedExceptionAction() { + return SubjectUtil.callAs(subject, new Callable() { @Override - public T run() throws Exception { + public T call() throws Exception { return callable.call(); } }); - } catch (PrivilegedActionException ex) { - throw ex.getException(); + } catch (CompletionException ex) { + Throwable cause = ex.getCause(); + if (cause instanceof Exception) { + throw (Exception) cause; + } else { + throw new RuntimeException(cause); + } } finally { if (loginContext != null) { loginContext.logout(); @@ -129,10 +140,20 @@ public T run() throws Exception { } } + public static T callAsClient(Callable callable) throws Exception { + return callAs(getClientPrincipal(), callable); + } + + @Deprecated public static T doAsClient(Callable callable) throws Exception { return doAs(getClientPrincipal(), callable); } + public static T callAsServer(Callable callable) throws Exception { + return callAs(getServerPrincipal(), callable); + } + + @Deprecated public static T doAsServer(Callable callable) throws Exception { return doAs(getServerPrincipal(), callable); } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java index 47697d0a6791c..fb3ce69a052ee 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java @@ -137,7 +137,7 @@ public void testAuthentication() throws Exception { final AuthenticatorTestCase auth = new AuthenticatorTestCase(); AuthenticatorTestCase.setAuthenticationHandlerConfig( getAuthenticationHandlerConfiguration()); - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { auth._testAuthentication(new KerberosAuthenticator(), false); @@ -152,7 +152,7 @@ public void testAuthenticationPost() throws Exception { final AuthenticatorTestCase auth = new AuthenticatorTestCase(); AuthenticatorTestCase.setAuthenticationHandlerConfig( getAuthenticationHandlerConfiguration()); - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { auth._testAuthentication(new KerberosAuthenticator(), true); @@ -167,7 +167,7 @@ public void testAuthenticationHttpClient() throws Exception { final AuthenticatorTestCase auth = new AuthenticatorTestCase(); AuthenticatorTestCase.setAuthenticationHandlerConfig( getAuthenticationHandlerConfiguration()); - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { auth._testAuthenticationHttpClient(new KerberosAuthenticator(), false); @@ -182,7 +182,7 @@ public void testAuthenticationHttpClientPost() throws Exception { final AuthenticatorTestCase auth = new AuthenticatorTestCase(); AuthenticatorTestCase.setAuthenticationHandlerConfig( getAuthenticationHandlerConfiguration()); - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { auth._testAuthenticationHttpClient(new KerberosAuthenticator(), true); @@ -217,7 +217,7 @@ public void testAuthenticationWithMultiAuthHandler() throws Exception { final AuthenticatorTestCase auth = new AuthenticatorTestCase(); AuthenticatorTestCase .setAuthenticationHandlerConfig(getMultiAuthHandlerConfiguration()); - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { auth._testAuthentication(new KerberosAuthenticator(), false); @@ -233,7 +233,7 @@ public void testAuthenticationHttpClientPostWithMultiAuthHandler() final AuthenticatorTestCase auth = new AuthenticatorTestCase(); AuthenticatorTestCase .setAuthenticationHandlerConfig(getMultiAuthHandlerConfiguration()); - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { auth._testAuthenticationHttpClient(new KerberosAuthenticator(), true); diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java index 03da289b81b98..633f6a8fd8619 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java @@ -295,7 +295,7 @@ public void testRequestWithIncompleteAuthorization() { @Test public void testRequestWithAuthorization() throws Exception { - String token = KerberosTestUtils.doAsClient(new Callable() { + String token = KerberosTestUtils.callAsClient(new Callable() { @Override public String call() throws Exception { GSSManager gssManager = GSSManager.getInstance(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java index 1c451ca6d30b9..5d7077668e0f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java @@ -22,6 +22,7 @@ import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -105,15 +106,16 @@ public Collection getChangedProperties( /** * A background thread to apply configuration changes. */ - private static class ReconfigurationThread extends Thread { + private static class ReconfigurationThread extends HadoopThread { private ReconfigurableBase parent; ReconfigurationThread(ReconfigurableBase base) { + super(); this.parent = base; } // See {@link ReconfigurationServlet#applyChanges} - public void run() { + public void work() { LOG.info("Starting reconfiguration task."); final Configuration oldConf = parent.getConf(); final Configuration newConf = parent.getNewConf(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index a3293620ab9e4..d25fc1a562b15 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -74,6 +74,7 @@ import java.util.List; import java.util.Map; import java.util.Queue; +import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -504,10 +505,10 @@ private HttpURLConnection createConnection(final URL url, String method) HttpURLConnection conn; try { final String doAsUser = getDoAsUser(); - conn = getActualUgi().doAs(new PrivilegedExceptionAction + conn = getActualUgi().callAs(new Callable () { @Override - public HttpURLConnection run() throws Exception { + public HttpURLConnection call() throws Exception { DelegationTokenAuthenticatedURL authUrl = createAuthenticatedURL(); return authUrl.openConnection(url, authToken, doAsUser); @@ -1026,9 +1027,9 @@ public Token getDelegationToken(final String renewer) throws IOException { Token token = null; try { final String doAsUser = getDoAsUser(); - token = getActualUgi().doAs(new PrivilegedExceptionAction>() { + token = getActualUgi().callAs(new Callable>() { @Override - public Token run() throws Exception { + public Token call() throws Exception { // Not using the cached token here.. Creating a new token here // everytime. LOG.debug("Getting new token from {}, renewer:{}", url, renewer); @@ -1065,10 +1066,10 @@ public long renewDelegationToken(final Token dToken) throws IOException { token, url, doAsUser); final DelegationTokenAuthenticatedURL authUrl = createAuthenticatedURL(); - return getActualUgi().doAs( - new PrivilegedExceptionAction() { + return getActualUgi().callAs( + new Callable() { @Override - public Long run() throws Exception { + public Long call() throws Exception { return authUrl.renewDelegationToken(url, token, doAsUser); } } @@ -1088,10 +1089,10 @@ public Void cancelDelegationToken(final Token dToken) throws IOException { final String doAsUser = getDoAsUser(); final DelegationTokenAuthenticatedURL.Token token = generateDelegationToken(dToken); - return getActualUgi().doAs( - new PrivilegedExceptionAction() { + return getActualUgi().callAs( + new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { final URL url = createURL(null, null, null, null); LOG.debug("Cancelling delegation token {} with url:{}, as:{}", dToken, url, doAsUser); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java index d7b61346d4e3b..b354314952e0f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -107,7 +108,7 @@ void init() { */ private void initRefreshThread(boolean runImmediately) { if (refreshInterval > 0) { - refreshUsed = new Thread(new RefreshThread(this, runImmediately), + refreshUsed = new HadoopThread(new RefreshThread(this, runImmediately), "refreshUsed-" + dirPath); refreshUsed.setDaemon(true); refreshUsed.start(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java index 794855508c63f..20eed047b5136 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +39,7 @@ */ @InterfaceAudience.Private public class DelegationTokenRenewer - extends Thread { + extends HadoopThread { private static final Logger LOG = LoggerFactory .getLogger(DelegationTokenRenewer.class); @@ -263,7 +264,7 @@ public void removeRenewAction( } @Override - public void run() { + public void work() { for(;;) { RenewAction action = null; try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index a903e337de140..575ddad059d5a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -22,7 +22,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.URI; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -35,6 +34,7 @@ import java.util.Stack; import java.util.TreeSet; import java.util.Map.Entry; +import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import javax.annotation.Nonnull; @@ -340,9 +340,9 @@ private static AbstractFileSystem getAbstractFileSystem( UserGroupInformation user, final URI uri, final Configuration conf) throws UnsupportedFileSystemException, IOException { try { - return user.doAs(new PrivilegedExceptionAction() { + return user.callAs(new Callable() { @Override - public AbstractFileSystem run() throws UnsupportedFileSystemException { + public AbstractFileSystem call() throws UnsupportedFileSystemException { return AbstractFileSystem.get(uri, conf); } }); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 930abf0b5d172..006d9f543858f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -25,7 +25,6 @@ import java.lang.ref.ReferenceQueue; import java.net.URI; import java.net.URISyntaxException; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -43,6 +42,7 @@ import java.util.Set; import java.util.Stack; import java.util.TreeSet; +import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicLong; @@ -81,6 +81,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.tracing.TraceScope; import org.apache.hadoop.util.Preconditions; @@ -271,9 +272,9 @@ public static FileSystem get(final URI uri, final Configuration conf, conf.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH); UserGroupInformation ugi = UserGroupInformation.getBestUGI(ticketCachePath, user); - return ugi.doAs(new PrivilegedExceptionAction() { + return ugi.callAs(new Callable() { @Override - public FileSystem run() throws IOException { + public FileSystem call() throws IOException { return get(uri, conf); } }); @@ -574,9 +575,9 @@ public static FileSystem newInstance(final URI uri, final Configuration conf, conf.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH); UserGroupInformation ugi = UserGroupInformation.getBestUGI(ticketCachePath, user); - return ugi.doAs(new PrivilegedExceptionAction() { + return ugi.callAs(new Callable() { @Override - public FileSystem run() throws IOException { + public FileSystem call() throws IOException { return newInstance(uri, conf); } }); @@ -4087,7 +4088,7 @@ private interface StatisticsAggregator { static { STATS_DATA_REF_QUEUE = new ReferenceQueue<>(); // start a single daemon cleaner thread - STATS_DATA_CLEANER = new Thread(new StatisticsDataReferenceCleaner()); + STATS_DATA_CLEANER = new HadoopThread(new StatisticsDataReferenceCleaner()); STATS_DATA_CLEANER. setName(StatisticsDataReferenceCleaner.class.getName()); STATS_DATA_CLEANER.setDaemon(true); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 44d203c684cd1..477d33c9ba7aa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -28,6 +28,7 @@ import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.Function; @@ -35,7 +36,6 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -328,9 +328,9 @@ protected Function initAndGetTargetFs() { public FileSystem apply(final URI uri) { FileSystem fs; try { - fs = ugi.doAs(new PrivilegedExceptionAction() { + fs = ugi.callAs(new Callable() { @Override - public FileSystem run() throws IOException { + public FileSystem call() throws IOException { if (enableInnerCache) { synchronized (cache) { return cache.get(uri, config); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 3d85015bea47e..f4673bbe007f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -26,7 +26,6 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.EnumSet; import java.util.HashSet; @@ -35,6 +34,7 @@ import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.Callable; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; @@ -247,10 +247,10 @@ protected Function initAndGetTargetFs() { public AbstractFileSystem apply(final URI uri) { AbstractFileSystem fs; try { - fs = ugi.doAs( - new PrivilegedExceptionAction() { + fs = ugi.callAs( + new Callable() { @Override - public AbstractFileSystem run() throws IOException { + public AbstractFileSystem call() throws IOException { return AbstractFileSystem.createFileSystem(uri, config); } }); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java index d222d52e37349..d37c321ff4311 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java @@ -283,7 +283,7 @@ public void uncaughtException(Thread t, Throwable e) { } @Override - public void run() { + public void work() { while (shouldRun) { try { loopUntilConnected(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java index 12a24fd079e62..f117cbcfbfa20 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ha; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import java.io.BufferedReader; @@ -50,7 +51,7 @@ enum StreamType { this.stream = stream; this.type = type; - thread = new Thread(new Runnable() { + thread = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java index 1f98e55bcd9fa..dcda905ad4a9b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java @@ -19,10 +19,9 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.Callable; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -179,9 +178,9 @@ public int run(final String[] args) throws Exception { } loginAsFCUser(); try { - return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction() { + return SecurityUtil.callAsLoginUserOrFatalNoException(new Callable() { @Override - public Integer run() { + public Integer call() { try { return doRun(args); } catch (Exception t) { @@ -578,9 +577,9 @@ private void doFence(HAServiceTarget target) { void cedeActive(final int millisToCede) throws AccessControlException, ServiceFailedException, IOException { try { - UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction() { + UserGroupInformation.getLoginUser().callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { doCedeActive(millisToCede); return null; } @@ -632,9 +631,9 @@ private void doCedeActive(int millisToCede) */ void gracefulFailoverToYou() throws ServiceFailedException, IOException { try { - UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction() { + UserGroupInformation.getLoginUser().callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { doGracefulFailover(); return null; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java index 60210ccd920c2..010f4928be9ff 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java @@ -158,7 +158,7 @@ void tryStart() { if (running.compareAndSet(null, current)) { final Daemon daemon = new Daemon() { @Override - public void run() { + public void work() { for (; isRunning(this);) { final long waitTime = checkCalls(); tryStop(this); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 5caf27edcdaf2..7e432c5041ebb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -54,6 +54,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.AsyncGet; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.tracing.Span; import org.apache.hadoop.tracing.Tracer; import org.slf4j.Logger; @@ -65,7 +66,6 @@ import java.io.*; import java.net.*; import java.nio.ByteBuffer; -import java.security.PrivilegedExceptionAction; import java.util.*; import java.util.Map.Entry; import java.util.concurrent.*; @@ -407,7 +407,7 @@ public synchronized void setRpcResponse(Writable rpcResponse) { /** Thread that reads responses and notifies callers. Each connection owns a * socket connected to a remote address. Calls are multiplexed through this * socket: responses may be delivered out of order. */ - private class Connection extends Thread { + private class Connection extends HadoopThread { private InetSocketAddress server; // server ip:port private final ConnectionId remoteId; // connection id private AuthMethod authMethod; // authentication method @@ -448,7 +448,7 @@ private class Connection extends Thread { Consumer removeMethod) { this.remoteId = remoteId; this.server = remoteId.getAddress(); - this.rpcRequestThread = new Thread(new RpcRequestSender(), + this.rpcRequestThread = new HadoopThread(new RpcRequestSender(), "IPC Parameter Sending Thread for " + remoteId); this.rpcRequestThread.setDaemon(true); @@ -752,9 +752,9 @@ private synchronized void handleSaslConnectionFailure( final int currRetries, final int maxRetries, final IOException ex, final Random rand, final UserGroupInformation ugi) throws IOException, InterruptedException { - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Object run() throws IOException, InterruptedException { + public Object call() throws IOException, InterruptedException { final short MAX_BACKOFF = 5000; closeConnection(); disposeSasl(); @@ -838,9 +838,9 @@ private synchronized void setupIOstreams( if (authProtocol == AuthProtocol.SASL) { try { authMethod = ticket - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public AuthMethod run() + public AuthMethod call() throws IOException, InterruptedException { return setupSaslConnection(ipcStreams); } @@ -1126,7 +1126,7 @@ private synchronized void sendPing() throws IOException { } @Override - public void run() { + public void work() { try { // Don't start the ipc parameter sending thread until we start this // thread, because the shutdown logic only gets triggered if this diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java index 39e55348c81e8..78c7df120c4f9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ipc; import java.io.IOException; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; @@ -28,12 +28,12 @@ import org.apache.hadoop.security.UserGroupInformation; public abstract class ExternalCall extends Call { - private final PrivilegedExceptionAction action; + private final Callable action; private final AtomicBoolean done = new AtomicBoolean(); private T result; private Throwable error; - public ExternalCall(PrivilegedExceptionAction action) { + public ExternalCall(Callable action) { this.action = action; } @@ -73,9 +73,9 @@ boolean isDone() { // invoked by ipc handler @Override - public final Void run() throws IOException { + public final Void call() throws IOException { try { - result = action.run(); + result = action.call(); sendResponse(); } catch (Throwable t) { abortResponse(t); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index ca7460a653c9a..2d8343ed857da 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -49,7 +49,6 @@ import java.nio.channels.SocketChannel; import java.nio.channels.WritableByteChannel; import java.nio.charset.StandardCharsets; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -63,6 +62,7 @@ import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; @@ -124,6 +124,8 @@ import org.apache.hadoop.util.ProtoUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; + import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.tracing.Span; import org.apache.hadoop.tracing.SpanContext; @@ -990,7 +992,7 @@ static boolean getClientBackoffEnable( /** A generic call queued for handling. */ public static class Call implements Schedulable, - PrivilegedExceptionAction { + Callable { private final ProcessingDetails processingDetails = new ProcessingDetails(TimeUnit.NANOSECONDS); // the method name to use in metrics @@ -1091,7 +1093,7 @@ public String toString() { } @Override - public Void run() throws Exception { + public Void call() throws Exception { return null; } // should eventually be abstract but need to avoid breaking tests @@ -1295,7 +1297,7 @@ public int getRemotePort() { } @Override - public Void run() throws Exception { + public Void call() throws Exception { if (!connection.channel.isOpen()) { Server.LOG.info(Thread.currentThread().getName() + ": skipped " + this); return null; @@ -1307,7 +1309,7 @@ public Void run() throws Exception { ResponseParams responseParams = new ResponseParams(); try { - value = call( + value = Server.this.call( rpcKind, connection.protocolName, rpcRequest, getTimestampNanos()); } catch (Throwable e) { populateResponseParamsOnError(e, responseParams); @@ -1471,7 +1473,7 @@ public String toString() { } /** Listens on the socket. Creates jobs for the handler threads*/ - private class Listener extends Thread { + private class Listener extends HadoopThread { private ServerSocketChannel acceptChannel = null; //the accept channel private Selector selector = null; //the selector that we use for the server @@ -1520,7 +1522,7 @@ void setIsAuxiliary() { this.isOnAuxiliaryPort = true; } - private class Reader extends Thread { + private class Reader extends HadoopThread { final private BlockingQueue pendingConnections; private final Selector readSelector; @@ -1533,7 +1535,7 @@ private class Reader extends Thread { } @Override - public void run() { + public void work() { LOG.info("Starting " + Thread.currentThread().getName()); try { doRunLoop(); @@ -1612,7 +1614,7 @@ void shutdown() { } @Override - public void run() { + public void work() { LOG.info(Thread.currentThread().getName() + ": starting"); SERVER.set(Server.this); connectionManager.startIdleScan(); @@ -1760,7 +1762,7 @@ Reader getReader() { } // Sends responses of RPC back to clients. - private class Responder extends Thread { + private class Responder extends HadoopThread { private final Selector writeSelector; private int pending; // connections waiting to register @@ -1772,7 +1774,7 @@ private class Responder extends Thread { } @Override - public void run() { + public void work() { LOG.info(Thread.currentThread().getName() + ": starting"); SERVER.set(Server.this); try { @@ -3219,7 +3221,7 @@ private void internalQueueCall(Call call, boolean blocking) } /** Handles queued calls . */ - private class Handler extends Thread { + private class Handler extends HadoopThread { public Handler(int instanceNumber) { this.setDaemon(true); this.setName("IPC Server handler "+ instanceNumber + @@ -3227,7 +3229,7 @@ public Handler(int instanceNumber) { } @Override - public void run() { + public void work() { LOG.debug("{}: starting", Thread.currentThread().getName()); SERVER.set(Server.this); while (running) { @@ -3274,9 +3276,9 @@ public void run() { UserGroupInformation remoteUser = call.getRemoteUser(); connDropped = !call.isOpen(); if (remoteUser != null) { - remoteUser.doAs(call); + remoteUser.callAs(call); } else { - call.run(); + call.call(); } } catch (InterruptedException e) { if (running) { // unexpected -- log it diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java index c8843f2812e57..60ebc96d1e9f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsSink; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +49,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { private final MetricsSink sink; private final MetricsFilter sourceFilter, recordFilter, metricFilter; private final SinkQueue queue; - private final Thread sinkThread; + private final HadoopThread sinkThread; private volatile boolean stopping = false; private volatile boolean inError = false; private final int periodMs, firstRetryDelay, retryCount; @@ -84,8 +85,8 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { "Dropped updates per sink", 0); qsize = registry.newGauge("Sink_"+ name + "Qsize", "Queue size", 0); - sinkThread = new Thread() { - @Override public void run() { + sinkThread = new HadoopThread() { + @Override public void work() { publishMetricsFromQueue(); } }; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java index 5c8a3357a3ee6..f867370add712 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java @@ -36,7 +36,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -440,7 +440,7 @@ private void sendCallbackAndRemove(String caller, } @VisibleForTesting - final Thread watcherThread = new Thread(new Runnable() { + final Thread watcherThread = new HadoopThread(new Runnable() { @Override public void run() { if (LOG.isDebugEnabled()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java index bb306836dc15e..eb20cc260345a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java @@ -29,6 +29,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import java.util.function.Supplier; import javax.security.auth.callback.Callback; @@ -153,10 +154,10 @@ public SaslServer create(final Connection connection, final SaslServer saslServer; if (ugi != null) { - saslServer = ugi.doAs( - new PrivilegedExceptionAction() { + saslServer = ugi.callAs( + new Callable() { @Override - public SaslServer run() throws SaslException { + public SaslServer call() throws SaslException { return saslFactory.createSaslServer(mechanism, protocol, serverId, saslProperties, callback); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index 1b6db31e7eb1d..2766352bedef4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_NAMESERVER_KEY; import java.io.IOException; +import java.lang.reflect.UndeclaredThrowableException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URI; @@ -32,6 +33,7 @@ import java.util.Collections; import java.util.List; import java.util.ServiceLoader; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import javax.annotation.Nullable; @@ -512,7 +514,9 @@ public static Text buildTokenService(URI uri) { * @param action action. * @param generic type T. * @return generic type T. + * @deprecated Use {@link #callAsLoginUserOrFatalNoException()} instead */ + @Deprecated public static T doAsLoginUserOrFatal(PrivilegedAction action) { if (UserGroupInformation.isSecurityEnabled()) { UserGroupInformation ugi = null; @@ -528,6 +532,62 @@ public static T doAsLoginUserOrFatal(PrivilegedAction action) { return action.run(); } } + + /** + * Perform the given action as the daemon's login user. If the login + * user cannot be determined, this will log a FATAL error and exit + * the whole JVM. + * + * This is the replacement for doAsLoginUserOrFatal(). As doAsLoginUserOrFatal + * doesn't return exceptions, this methods also expects a Callable that + * does not return checked Exceptions + * + * @param action action. + * @param generic type T. + * @return generic type T. + * @throws Exception + */ + public static T callAsLoginUserOrFatalNoException(Callable action) { + if (UserGroupInformation.isSecurityEnabled()) { + UserGroupInformation ugi = null; + try { + ugi = UserGroupInformation.getLoginUser(); + } catch (IOException e) { + LOG.error("Exception while getting login user", e); + e.printStackTrace(); + Runtime.getRuntime().exit(-1); + } + return ugi.callAsNoException(action); + } else { + try { + return action.call(); + } catch (Exception e) { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else { + throw new UndeclaredThrowableException(e); + } + } + } + } + + /** + * Perform the given action as the daemon's login user. If an + * InterruptedException is thrown, it is converted to an IOException. + * + * @param action the action to perform + * @param Generics Type T. + * @return the result of the action + * @throws IOException in the event of error + */ + public static T callAsLoginUser(Callable action) + throws IOException { + try { + return UserGroupInformation.getLoginUser().callAs(action); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + } /** * Perform the given action as the daemon's login user. If an @@ -537,7 +597,9 @@ public static T doAsLoginUserOrFatal(PrivilegedAction action) { * @param Generics Type T. * @return the result of the action * @throws IOException in the event of error + * @deprecated Use {@link #callAsLoginUser()} instead */ + @Deprecated public static T doAsLoginUser(PrivilegedExceptionAction action) throws IOException { return doAsUser(UserGroupInformation.getLoginUser(), action); @@ -552,11 +614,32 @@ public static T doAsLoginUser(PrivilegedExceptionAction action) * @return the result of the action * @throws IOException in the event of error */ + public static T callAsCurrentUser(Callable action) + throws IOException { + try { + return UserGroupInformation.getCurrentUser().callAs(action); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + } + + /** + * Perform the given action as the daemon's current user. If an + * InterruptedException is thrown, it is converted to an IOException. + * + * @param action the action to perform + * @param generic type T. + * @return the result of the action + * @throws IOException in the event of error + * @deprecated Use {@link #callAsCurrentUser()} instead + */ + @Deprecated public static T doAsCurrentUser(PrivilegedExceptionAction action) throws IOException { return doAsUser(UserGroupInformation.getCurrentUser(), action); } + @Deprecated private static T doAsUser(UserGroupInformation ugi, PrivilegedExceptionAction action) throws IOException { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 6525460d56180..8ab013d2e10f6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -33,8 +33,6 @@ import java.io.File; import java.io.IOException; import java.lang.reflect.UndeclaredThrowableException; -import java.security.AccessControlContext; -import java.security.AccessController; import java.security.Principal; import java.security.PrivilegedAction; import java.security.PrivilegedActionException; @@ -50,6 +48,8 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; @@ -89,8 +89,9 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.SubjectUtil; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -585,8 +586,7 @@ public boolean hasKerberosCredentials() { @InterfaceStability.Evolving public static UserGroupInformation getCurrentUser() throws IOException { ensureInitialized(); - AccessControlContext context = AccessController.getContext(); - Subject subject = Subject.getSubject(context); + Subject subject = SubjectUtil.current(); if (subject == null || subject.getPrincipals(User.class).isEmpty()) { return getLoginUser(); } else { @@ -932,7 +932,7 @@ private void executeAutoRenewalTask(final String userName, new ThreadFactory() { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setDaemon(true); t.setName("TGT Renewer for " + userName); return t; @@ -1926,19 +1926,97 @@ protected Subject getSubject() { return subject; } + /** + * Run the given action as the user. + * @param the return type of the run method + * @param action the Callable to execute + * @return the value from the Callable's call method + * @throws IOException if the action throws an IOException + * @throws Error if the action throws an Error + * @throws RuntimeException if the action throws a RuntimeException + * @throws InterruptedException if the action throws an InterruptedException + * @throws UndeclaredThrowableException if the action throws something else + */ + @InterfaceAudience.Public + @InterfaceStability.Evolving + public T callAs(Callable action) throws IOException, InterruptedException { + tracePrivilegedAction(action); + try { + return SubjectUtil.callAs(subject, action); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + LOG.debug("CompletionException as: {}", this, cause); + if (cause == null) { + throw new RuntimeException("CompletionException with no " + + "underlying cause. UGI [" + this + "]" +": " + ce, ce); + } else if (cause instanceof IOException) { + throw (IOException) cause; + } else if (cause instanceof Error) { + throw (Error) cause; + } else if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else if (cause instanceof InterruptedException) { + throw (InterruptedException) cause; + } else { + throw new UndeclaredThrowableException(cause); + } + } + } + + /** + * Run the given action as the user. + * + * This variant should only be used with Callables that do not throw checked exceptions + * + * This methods is meant to simplify migrating from doAs(PrivilegedAction action) + * + * @param the return type of the run method + * @param action the Callable to execute + * @return the value from the Callable's call method + * @throws IOException if the action throws an IOException + * @throws Error if the action throws an Error + * @throws RuntimeException if the action throws a RuntimeException + * @throws InterruptedException if the action throws an InterruptedException + * @throws UndeclaredThrowableException if the action throws something else + */ + @InterfaceAudience.Public + @InterfaceStability.Evolving + public T callAsNoException(Callable action) { + tracePrivilegedAction(action); + try { + return SubjectUtil.callAs(subject, action); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + LOG.debug("CompletionException as: {}", this, cause); + if (cause == null) { + throw new RuntimeException("CompletionException with no " + + "underlying cause. UGI [" + this + "]" +": " + ce, ce); + } else if (cause instanceof Error) { + throw (Error) cause; + } else if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else { + throw new UndeclaredThrowableException(cause); + } + } + } + /** * Run the given action as the user. * @param the return type of the run method * @param action the method to execute * @return the value from the run method + * @deprecated Use {@link #callAs()} or {@link #callAsNoException()} instead. */ @InterfaceAudience.Public @InterfaceStability.Evolving + @Deprecated public T doAs(PrivilegedAction action) { tracePrivilegedAction(action); - return Subject.doAs(subject, action); + return SubjectUtil.doAs(subject, action); } - + + /** * Run the given action as the user, potentially throwing an exception. * @param the return type of the run method @@ -1949,14 +2027,16 @@ public T doAs(PrivilegedAction action) { * @throws RuntimeException if the action throws a RuntimeException * @throws InterruptedException if the action throws an InterruptedException * @throws UndeclaredThrowableException if the action throws something else + * @deprecated Use {@link #callAs()} instead. */ @InterfaceAudience.Public @InterfaceStability.Evolving + @Deprecated public T doAs(PrivilegedExceptionAction action ) throws IOException, InterruptedException { try { tracePrivilegedAction(action); - return Subject.doAs(subject, action); + return SubjectUtil.doAs(subject, action); } catch (PrivilegedActionException pae) { Throwable cause = pae.getCause(); LOG.debug("PrivilegedActionException as: {}", this, cause); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index 9cf3ccdd445e7..960f6aaf12f40 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -60,7 +60,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.functional.InvocationRaisingIOE; import org.slf4j.Logger; @@ -912,12 +912,12 @@ public boolean isRunning() { return running; } - private class ExpiredTokenRemover extends Thread { + private class ExpiredTokenRemover extends HadoopThread { private long lastMasterKeyUpdate; private long lastTokenCacheCleanup; @Override - public void run() { + public void work() { LOG.info("Starting expired delegation token remover thread, " + "tokenRemoverScanInterval=" + tokenRemoverScanInterval / (60 * 1000) + " min(s)"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java index 4d43c3a106f5e..b9f11152203be 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java @@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,7 +117,7 @@ public void interrupted(IrqHandler.InterruptData interruptData) { //start an async shutdown thread with a timeout ServiceForcedShutdown shutdown = new ServiceForcedShutdown(service, shutdownTimeMillis); - Thread thread = new Thread(shutdown); + Thread thread = new HadoopThread(shutdown); thread.setDaemon(true); thread.setName("Service Forced Shutdown"); thread.start(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java index a3bf4faf0a980..5b877618ef5e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,7 +75,7 @@ public AsyncDiskService(String[] volumes) { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - return new Thread(threadGroup, r); + return new HadoopThread(threadGroup, r); } }; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java index 5c90e4bd2d601..e1d01f82c919b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java @@ -29,6 +29,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * This ExecutorService blocks the submission of new tasks when its queue is @@ -71,7 +72,7 @@ static ThreadFactory getNamedThreadFactory(final String prefix) { public Thread newThread(Runnable r) { final String name = prefix + "-pool" + poolNum + "-t" + threadNumber.getAndIncrement(); - return new Thread(group, r, name); + return new HadoopThread(group, r, name); } }; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java index f735b82e4289b..670c63f91b3e6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java @@ -18,16 +18,70 @@ package org.apache.hadoop.util; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; import java.util.concurrent.ThreadFactory; +import javax.security.auth.Subject; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -/** A thread that has called {@link Thread#setDaemon(boolean) } with true.*/ +/** A thread that has called {@link Thread#setDaemon(boolean) } with true. + * + * The runnable code must either be specified in the runnable parameter or + * in the override work() method. + * + * The subject propagation is already added in either case. + * + * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public class Daemon extends Thread { + Subject startSubject; + + @Override + public final void start() { + startSubject = SubjectUtil.current(); + super.start(); + } + + /** + * Override this instead of run() + */ + public void work() { + throw new IllegalArgumentException(""); + } + + @Override + public final void run() { + try { + SubjectUtil.callAs(startSubject, new Callable() { + + @Override + public Void call() throws Exception { + if (runnable != null) { + runnable.run(); + } else { + work(); + } + return null; + } + + }); + } catch (CompletionException ce) { + Throwable t = ce.getCause(); + if (t instanceof RuntimeException) { + throw (RuntimeException) t; + } else if (t instanceof Error) { + throw (Error) t; + } else { + throw new RuntimeException("Unexpected exception", t); + } + } + } + { setDaemon(true); // always a daemon } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java index 95d0d4d290ccd..a8aa6079c557f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java @@ -23,13 +23,15 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.util.concurrent.HadoopThread; + /** * This class monitors the percentage of time the JVM is paused in GC within * the specified observation window, say 1 minute. The user can provide a * hook which will be called whenever this percentage exceeds the specified * threshold. */ -public class GcTimeMonitor extends Thread { +public class GcTimeMonitor extends HadoopThread { private final long maxGcTimePercentage; private final long observationWindowMs, sleepIntervalMs; @@ -151,7 +153,7 @@ public GcTimeMonitor(long observationWindowMs, long sleepIntervalMs, } @Override - public void run() { + public void work() { startTime = System.currentTimeMillis(); curData.timestamp = startTime; gcDataBuf[startIdx].setValues(startTime, 0); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index c53ddc0725ca2..d025af725a955 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -36,6 +36,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; @@ -1020,9 +1021,9 @@ private void runCommand() throws IOException { // read error and input streams as this would free up the buffers // free the error stream buffer - Thread errThread = new Thread() { + Thread errThread = new HadoopThread() { @Override - public void run() { + public void work() { try { String line = errReader.readLine(); while((line != null) && !isInterrupted()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java index e85f850514b16..de521001d2a79 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,9 +85,9 @@ public final class ShutdownHookManager { static { try { Runtime.getRuntime().addShutdownHook( - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { if (MGR.shutdownInProgress.getAndSet(true)) { LOG.info("Shutdown process invoked a second time: ignoring"); return; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java index 78e729b81d308..da3f9ef702e39 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java @@ -20,12 +20,18 @@ package org.apache.hadoop.util.concurrent; +import org.apache.hadoop.util.SubjectUtil; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.concurrent.Callable; +import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.RejectedExecutionHandler; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; /** An extension of ScheduledThreadPoolExecutor that provides additional * functionality. */ @@ -68,4 +74,53 @@ protected void afterExecute(Runnable r, Throwable t) { super.afterExecute(r, t); ExecutorHelper.logThrowableFromAfterExecute(r, t); } + + /** + * @throws RejectedExecutionException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + */ + @Override + public ScheduledFuture schedule(Runnable command, + long delay, + TimeUnit unit) { + return super.schedule(SubjectUtil.wrap(command), delay, unit); + } + + /** + * @throws RejectedExecutionException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + */ + @Override + public ScheduledFuture schedule(Callable callable, + long delay, + TimeUnit unit) { + return super.schedule(SubjectUtil.wrap(callable), delay, unit); + } + + /** + * @throws RejectedExecutionException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + */ + @Override + public ScheduledFuture scheduleAtFixedRate(Runnable command, + long initialDelay, + long period, + TimeUnit unit) { + return super.scheduleAtFixedRate(SubjectUtil.wrap(command), initialDelay, period, unit); + } + + /** + * @throws RejectedExecutionException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + */ + @Override + public ScheduledFuture scheduleWithFixedDelay(Runnable command, + long initialDelay, + long delay, + TimeUnit unit) { + return super.scheduleWithFixedDelay(SubjectUtil.wrap(command), initialDelay, delay, unit); + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java new file mode 100644 index 0000000000000..db9f2db517e5d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.concurrent; + +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; + +import javax.security.auth.Subject; + +import org.apache.hadoop.util.SubjectUtil; + +/** + * Helper class to restore Subject propagation behavior after the JEP411/JEP486 changes + * + * Runnables can be specified normally, but the work() method has to be overridden instead of + * run() when subclassing. + */ +public class HadoopThread extends Thread { + + Subject startSubject; + Runnable hadoopTarget; + + public HadoopThread() { + super(); + } + + public HadoopThread(Runnable target) { + super(); + this.hadoopTarget = target; + } + + public HadoopThread(ThreadGroup group, Runnable target) { + // The target passed to Thread has no effect, we only pass it + // because there is no super(group) constructor. + super(group, target); + this.hadoopTarget = target; + } + + public HadoopThread(Runnable target, String name) { + super(name); + this.hadoopTarget = target; + } + + public HadoopThread(String name) { + super(name); + } + + public HadoopThread(ThreadGroup group, String name) { + super(group, name); + } + + public HadoopThread(ThreadGroup group, Runnable target, String name) { + super(group, name); + this.hadoopTarget = target; + } + + @Override + public final void start() { + startSubject = SubjectUtil.current(); + super.start(); + } + + /** + * Override this instead of run() + */ + public void work() { + throw new IllegalArgumentException("No Runnable was specified and work() is not overriden"); + } + + @Override + public final void run() { + try { + SubjectUtil.callAs(startSubject, new Callable() { + + @Override + public Void call() throws Exception { + if (hadoopTarget != null) { + hadoopTarget.run(); + } else { + work(); + } + return null; + } + + }); + } catch (CompletionException ce) { + Throwable t = ce.getCause(); + if (t instanceof RuntimeException) { + throw (RuntimeException) t; + } else if (t instanceof Error) { + throw (Error) t; + } else { + throw new RuntimeException("Unexpected exception", t); + } + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThreadPoolExecutor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThreadPoolExecutor.java index fa845b75e386c..caf839d36ce7b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThreadPoolExecutor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThreadPoolExecutor.java @@ -20,6 +20,7 @@ package org.apache.hadoop.util.concurrent; +import org.apache.hadoop.util.SubjectUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,6 +77,11 @@ public HadoopThreadPoolExecutor(int corePoolSize, threadFactory, handler); } + @Override + public void execute(Runnable command) { + super.execute(SubjectUtil.wrap(command)); + } + @Override protected void beforeExecute(Thread t, Runnable r) { if (LOG.isDebugEnabled()) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index fa0301b251765..313b5767295d3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -81,6 +81,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; @@ -2746,7 +2747,7 @@ private static Configuration checkCDATA(byte[] bytes) { @Test public void testConcurrentModificationDuringIteration() throws InterruptedException { Configuration configuration = new Configuration(); - new Thread(() -> { + new HadoopThread(() -> { while (true) { configuration.set(String.valueOf(Math.random()), String.valueOf(Math.random())); } @@ -2754,7 +2755,7 @@ public void testConcurrentModificationDuringIteration() throws InterruptedExcept AtomicBoolean exceptionOccurred = new AtomicBoolean(false); - new Thread(() -> { + new HadoopThread(() -> { while (true) { try { configuration.iterator(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java index c475f7c826bb3..d3c3b88afa802 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java @@ -22,6 +22,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -284,7 +285,7 @@ public void testReconfigure() { public void testThread() throws ReconfigurationException { ReconfigurableDummy dummy = new ReconfigurableDummy(conf1); assertTrue(dummy.getConf().get(PROP1).equals(VAL1)); - Thread dummyThread = new Thread(dummy); + Thread dummyThread = new HadoopThread(dummy); dummyThread.start(); try { Thread.sleep(500); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java index 7e6422512ea70..fd541afe122b4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java @@ -37,11 +37,11 @@ import java.net.UnknownHostException; import java.security.GeneralSecurityException; import java.security.NoSuchAlgorithmException; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.concurrent.Callable; import javax.net.ssl.SSLException; import javax.net.ssl.SSLHandshakeException; @@ -935,9 +935,9 @@ private void testTokenSelectionWithConf(Configuration conf) throws Exception { token.setService(new Text(providerUriString)); // call getActualUgi() with the current user. UserGroupInformation actualUgi = - ugi.doAs(new PrivilegedExceptionAction(){ + ugi.callAs(new Callable(){ @Override - public UserGroupInformation run() throws Exception { + public UserGroupInformation call() throws Exception { final KeyProvider kp = new KMSClientProvider.Factory().createProvider(kmsUri, conf); final LoadBalancingKMSClientProvider lbkp = diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java index 713de5c72ca74..789afd8ea8d76 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java @@ -18,10 +18,10 @@ package org.apache.hadoop.fs; import java.io.IOException; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; import java.util.StringTokenizer; +import java.util.concurrent.Callable; import org.apache.hadoop.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; @@ -183,10 +183,10 @@ public void testUgi() throws IOException, InterruptedException { UserGroupInformation otherUser = UserGroupInformation .createRemoteUser("otherUser"); - FileContext newFc = otherUser.doAs(new PrivilegedExceptionAction() { + FileContext newFc = otherUser.callAs(new Callable() { @Override - public FileContext run() throws Exception { + public FileContext call() throws Exception { FileContext newFc = FileContext.getFileContext(); return newFc; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java index 119bad41a3028..645efc405c4ef 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java @@ -21,9 +21,9 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.Callable; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -199,7 +199,7 @@ public void testCacheForUgi() throws Exception { */ private FileSystem getCachedFS(UserGroupInformation ugi, Configuration conf) throws IOException, InterruptedException { - return ugi.doAs((PrivilegedExceptionAction) + return ugi.callAs((Callable) () -> FileSystem.get(new URI("cachedfile://a"), conf)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetEnclosingRoot.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetEnclosingRoot.java index 7bcc44e453a0e..ae4cd5a84b186 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetEnclosingRoot.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetEnclosingRoot.java @@ -18,7 +18,8 @@ package org.apache.hadoop.fs; import java.io.IOException; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.HadoopTestBase; @@ -70,7 +71,7 @@ public void testEnclosingRootWrapped() throws Exception { assertEquals(root, fs.getEnclosingRoot(new Path("/foo/bar"))); UserGroupInformation ugi = UserGroupInformation.createRemoteUser("foo"); - Path p = ugi.doAs((PrivilegedExceptionAction) () -> { + Path p = ugi.callAs((Callable) () -> { FileSystem wFs = getFileSystem(); return wFs.getEnclosingRoot(new Path("/foo/bar")); }); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java index 89d7419f763d2..27bcc0c108485 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java @@ -49,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * This class tests commands from Trash. @@ -724,7 +725,7 @@ public void testTrashEmptier() throws Exception { // Start Emptier in background Runnable emptier = trash.getEmptier(); - Thread emptierThread = new Thread(emptier); + Thread emptierThread = new HadoopThread(emptier); emptierThread.start(); FsShell shell = new FsShell(); @@ -792,7 +793,7 @@ public void testTrashEmptierCleanDirNotInCheckpointDir() throws Exception { // Start Emptier in background. Runnable emptier = trash.getEmptier(); - Thread emptierThread = new Thread(emptier); + Thread emptierThread = new HadoopThread(emptier); emptierThread.start(); FsShell shell = new FsShell(); @@ -1049,7 +1050,7 @@ private void verifyAuditableTrashEmptier(Trash trash, Thread emptierThread = null; try { Runnable emptier = trash.getEmptier(); - emptierThread = new Thread(emptier); + emptierThread = new HadoopThread(emptier); emptierThread.start(); // Shutdown the emptier thread after a given time diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java index b47e5ea08c452..6fabc4c289a2f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java @@ -18,7 +18,8 @@ package org.apache.hadoop.fs.contract; import java.io.IOException; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; + import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.UserGroupInformation; @@ -93,7 +94,7 @@ public void testEnclosingRootWrapped() throws Exception { "when the directory exists"); UserGroupInformation ugi = UserGroupInformation.createRemoteUser("foo"); - Path p = ugi.doAs((PrivilegedExceptionAction) () -> { + Path p = ugi.callAs((Callable) () -> { FileSystem wFs = getContract().getTestFileSystem(); return wFs.getEnclosingRoot(new Path("/foo/bar")); }); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index 7d37bddf2a0b8..5d0211b798f90 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -22,7 +22,6 @@ import java.io.FileOutputStream; import java.io.IOException; import java.net.URI; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -30,6 +29,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Random; +import java.util.concurrent.Callable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -1526,9 +1526,9 @@ public void testCheckOwnerWithFileStatus() throws IOException, InterruptedException { final UserGroupInformation userUgi = UserGroupInformation .createUserForTesting("user@HADOOP.COM", new String[]{"hadoop"}); - userUgi.doAs(new PrivilegedExceptionAction() { + userUgi.callAs(new Callable() { @Override - public Object run() throws IOException { + public Object call() throws IOException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String doAsUserName = ugi.getUserName(); assertEquals("user@HADOOP.COM", doAsUserName); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java index a664b8f1ae761..4ffda8e6210c0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java @@ -47,6 +47,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; @@ -966,9 +967,9 @@ public void testOwnerForInternalDir() throws IOException, InterruptedException, URISyntaxException { final UserGroupInformation userUgi = UserGroupInformation .createUserForTesting("user@HADOOP.COM", new String[]{"hadoop"}); - userUgi.doAs(new PrivilegedExceptionAction() { + userUgi.callAs(new Callable() { @Override - public Object run() throws IOException, URISyntaxException { + public Object call() throws IOException, URISyntaxException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String doAsUserName = ugi.getUserName(); assertEquals(doAsUserName, "user@HADOOP.COM"); @@ -1084,9 +1085,9 @@ static AbstractFileSystem getMockFs(URI uri) { public void testListStatusWithNoGroups() throws Exception { final UserGroupInformation userUgi = UserGroupInformation .createUserForTesting("user@HADOOP.COM", new String[] {}); - userUgi.doAs(new PrivilegedExceptionAction() { + userUgi.callAs(new Callable() { @Override - public Object run() throws Exception { + public Object call() throws Exception { URI viewFsUri = new URI( FsConstants.VIEWFS_SCHEME, MOUNT_TABLE_NAME, "/", null, null); FileSystem vfs = FileSystem.get(viewFsUri, conf); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index 95c4abe5e3907..95a08dfd1299d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.io; +import org.apache.hadoop.util.concurrent.HadoopThread; import java.io.IOException; import java.nio.BufferUnderflowException; @@ -300,13 +301,13 @@ public void testTextText() throws CharacterCodingException { assertEquals(8, a.copyBytes().length); } - private class ConcurrentEncodeDecodeThread extends Thread { + private class ConcurrentEncodeDecodeThread extends HadoopThread { public ConcurrentEncodeDecodeThread(String name) { super(name); } @Override - public void run() { + public void work() { final String name = this.getName(); DataOutputBuffer out = new DataOutputBuffer(); DataInputBuffer in = new DataInputBuffer(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java index d44727b4b65b6..9db2ebbf8e0aa 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -44,15 +45,15 @@ public class TestNativeIoInit { @Test @Timeout(value = 10) public void testDeadlockLinux() throws Exception { - Thread one = new Thread() { + Thread one = new HadoopThread() { @Override - public void run() { + public void work() { NativeIO.isAvailable(); } }; - Thread two = new Thread() { + Thread two = new HadoopThread() { @Override - public void run() { + public void work() { NativeIO.POSIX.isAvailable(); } }; @@ -72,9 +73,9 @@ public void run() { NativeIO.isAvailable(); } }; - Thread two = new Thread() { + Thread two = new HadoopThread() { @Override - public void run() { + public void work() { try { NativeIO.Windows.extendWorkingSetSize(100); } catch (IOException e) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java index 76db3b332225e..3860b5a3dbba5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java @@ -21,10 +21,10 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.NetworkInterface; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; +import java.util.concurrent.Callable; import org.apache.hadoop.test.GenericTestUtils; @@ -208,9 +208,9 @@ void connectToServerAndGetDelegationToken( MINI_USER, current, GROUP_NAMES); try { - client = proxyUserUgi.doAs(new PrivilegedExceptionAction() { + client = proxyUserUgi.callAs(new Callable() { @Override - public MiniProtocol run() throws IOException { + public MiniProtocol call() throws IOException { MiniProtocol p = RPC.getProxy(MiniProtocol.class, MiniProtocol.versionID, addr, conf); Token token; @@ -236,9 +236,9 @@ long connectToServerUsingDelegationToken( try { long start = Time.now(); try { - client = currentUgi.doAs(new PrivilegedExceptionAction() { + client = currentUgi.callAs(new Callable() { @Override - public MiniProtocol run() throws IOException { + public MiniProtocol call() throws IOException { return RPC.getProxy(MiniProtocol.class, MiniProtocol.versionID, addr, conf); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java index 1204aaa69b63c..e21167b93d0e9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java @@ -43,7 +43,7 @@ import java.lang.management.ManagementFactory; import java.lang.management.ThreadMXBean; import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicLong; /** @@ -352,9 +352,9 @@ private TestContext setupClientTestContext(final MyOptions opts) for (int i = 0; i < numProxies; i++) { proxies[i] = UserGroupInformation.createUserForTesting("proxy-" + i,new String[]{}) - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public RpcServiceWrapper run() throws Exception { + public RpcServiceWrapper call() throws Exception { return createRpcClient(opts); } }); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java index 1e0afe587ca96..4c76162d57bba 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java @@ -97,7 +97,7 @@ static class AsyncCaller extends Thread { @Override public void run() { - // In case Thread#Start is called, which will spawn new thread. + // in case Thread#Start is called, which will spawn new thread Client.setAsynchronousMode(true); for (int i = 0; i < count; i++) { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java index bc607d762a3cd..57bdd2c7047b5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -151,7 +152,7 @@ public void assertCanTake(CallQueueManager cq, int numberOfTakes, int takeAttempts) throws InterruptedException { Taker taker = new Taker(cq, takeAttempts, -1); - Thread t = new Thread(taker); + Thread t = new HadoopThread(taker); t.start(); t.join(100); @@ -164,7 +165,7 @@ public void assertCanPut(CallQueueManager cq, int numberOfPuts, int putAttempts) throws InterruptedException { Putter putter = new Putter(cq, putAttempts, -1); - Thread t = new Thread(putter); + Thread t = new HadoopThread(putter); t.start(); t.join(100); @@ -277,7 +278,7 @@ public void testSwapUnderContention() throws InterruptedException { // Create putters and takers for (int i=0; i < 1000; i++) { Putter p = new Putter(manager, -1, -1); - Thread pt = new Thread(p); + Thread pt = new HadoopThread(p); producers.add(p); threads.put(p, pt); @@ -286,7 +287,7 @@ public void testSwapUnderContention() throws InterruptedException { for (int i=0; i < 100; i++) { Taker t = new Taker(manager, -1, -1); - Thread tt = new Thread(t); + Thread tt = new HadoopThread(t); consumers.add(t); threads.put(t, tt); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java index 1afc88c562c8e..eb4c496e7c786 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java @@ -50,6 +50,7 @@ import java.util.List; import java.util.concurrent.BlockingQueue; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; @@ -684,7 +685,7 @@ public void assertCanTake(BlockingQueue cq, int numberOfTakes, CountDownLatch latch = new CountDownLatch(numberOfTakes); Taker taker = new Taker(cq, takeAttempts, "default", latch); - Thread t = new Thread(taker); + Thread t = new HadoopThread(taker); t.start(); latch.await(); @@ -698,7 +699,7 @@ public void assertCanPut(BlockingQueue cq, int numberOfPuts, CountDownLatch latch = new CountDownLatch(numberOfPuts); Putter putter = new Putter(cq, putAttempts, null, latch); - Thread t = new Thread(putter); + Thread t = new HadoopThread(putter); t.start(); latch.await(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index a191095b44516..1f30fe38fa627 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -103,6 +103,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -996,7 +997,7 @@ private void checkBlocking(int readers, int readerQ, int callQ) throws Exception // instantiate the threads, will start in batches Thread[] threads = new Thread[clients]; for (int i=0; i remoteUserCall = newExtCall(ugi, - new PrivilegedExceptionAction() { + new Callable() { @Override - public String run() throws Exception { + public String call() throws Exception { return UserGroupInformation.getCurrentUser().getUserName(); } }); ExternalCall exceptionCall = newExtCall(ugi, - new PrivilegedExceptionAction() { + new Callable() { @Override - public String run() throws Exception { + public String call() throws Exception { throw expectedIOE; } }); @@ -1250,9 +1250,9 @@ public String run() throws Exception { final CyclicBarrier barrier = new CyclicBarrier(2); ExternalCall barrierCall = newExtCall(ugi, - new PrivilegedExceptionAction() { + new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { // notify we are in a handler and then wait to keep the callq // plugged up latch.countDown(); @@ -1291,7 +1291,7 @@ public Void run() throws Exception { } private ExternalCall newExtCall(UserGroupInformation ugi, - PrivilegedExceptionAction callable) { + Callable callable) { return new ExternalCall(callable) { @Override public String getProtocol() { @@ -1320,8 +1320,8 @@ public void testRpcMetrics() throws Exception { UserGroupInformation anotherUser = UserGroupInformation.createRemoteUser(testUser); TestRpcService proxy2 = - anotherUser.doAs(new PrivilegedAction() { - public TestRpcService run() { + anotherUser.callAs(new Callable() { + public TestRpcService call() { try { return RPC.getProxy(TestRpcService.class, 0, server.getListenerAddress(), conf); @@ -2006,7 +2006,7 @@ public void testRpcMetricsInNanos() throws Exception { UserGroupInformation anotherUser = UserGroupInformation.createRemoteUser(testUser); TestRpcService proxy2 = - anotherUser.doAs((PrivilegedAction) () -> { + anotherUser.callAs((Callable) () -> { try { return RPC.getProxy(TestRpcService.class, 0, server.getListenerAddress(), conf); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcServerHandoff.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcServerHandoff.java index f30a6165edc16..b4218bcaeb5f9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcServerHandoff.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcServerHandoff.java @@ -34,6 +34,7 @@ import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -116,7 +117,7 @@ public void testDeferredResponse() throws IOException, InterruptedException, new ClientCallable(serverAddress, conf, requestBytes); FutureTask future = new FutureTask(clientCallable); - Thread clientThread = new Thread(future); + Thread clientThread = new HadoopThread(future); clientThread.start(); server.awaitInvocation(); @@ -146,7 +147,7 @@ public void testDeferredException() throws IOException, InterruptedException, new ClientCallable(serverAddress, conf, requestBytes); FutureTask future = new FutureTask(clientCallable); - Thread clientThread = new Thread(future); + Thread clientThread = new HadoopThread(future); clientThread.start(); server.awaitInvocation(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 68fa10c6c1466..bca8a02ec21e6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -56,7 +56,6 @@ import java.lang.annotation.Annotation; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; import java.security.Security; import java.util.ArrayList; import java.util.Collection; @@ -621,7 +620,7 @@ public void testClientFallbackToSimpleAuthForASecondClient(QualityOfProtection[] AtomicBoolean fallbackToSimpleAuth2 = new AtomicBoolean(); try { LOG.info("trying ugi:"+ clientUgi +" tokens:"+ clientUgi.getTokens()); - clientUgi.doAs((PrivilegedExceptionAction) () -> { + clientUgi.callAs((Callable) () -> { TestRpcService proxy1 = null; TestRpcService proxy2 = null; try { @@ -862,9 +861,9 @@ public void testSaslResponseOrdering(QualityOfProtection[] pQop, Token token = new Token<>(tokenId, sm); SecurityUtil.setTokenService(token, addr); clientUgi.addToken(token); - clientUgi.doAs(new PrivilegedExceptionAction() { + clientUgi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { final TestRpcService proxy = getClient(addr, conf); final ExecutorService executor = Executors.newCachedThreadPool(); final AtomicInteger count = new AtomicInteger(); @@ -988,9 +987,9 @@ private SecretManager createServerSecretManager( private Server startServer(Configuration serverConf, UserGroupInformation serverUgi, SecretManager serverSm) throws IOException, InterruptedException { - Server server = serverUgi.doAs(new PrivilegedExceptionAction() { + Server server = serverUgi.callAs(new Callable() { @Override - public Server run() throws IOException { + public Server call() throws IOException { return setupTestServer(serverConf, 5, serverSm); } }); @@ -1048,9 +1047,9 @@ private String createClientAndQueryAuthMethod(InetSocketAddress serverAddress, Configuration clientConf, UserGroupInformation clientUgi, AtomicBoolean fallbackToSimpleAuth) throws IOException, InterruptedException { LOG.info("trying ugi:"+ clientUgi +" tokens:"+ clientUgi.getTokens()); - return clientUgi.doAs(new PrivilegedExceptionAction() { + return clientUgi.callAs(new Callable() { @Override - public String run() throws IOException { + public String call() throws IOException { TestRpcService proxy = null; try { proxy = getClient(serverAddress, clientConf, null, fallbackToSimpleAuth); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java index 51c66abb3fc26..74529bd89fe61 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java @@ -38,6 +38,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.SocksSocketFactory; import org.apache.hadoop.net.StandardSocketFactory; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -63,7 +64,7 @@ public class TestSocketFactory { private void startTestServer() throws Exception { // start simple tcp server. serverRunnable = new ServerRunnable(); - serverThread = new Thread(serverRunnable); + serverThread = new HadoopThread(serverRunnable); serverThread.start(); final long timeout = System.currentTimeMillis() + START_STOP_TIMEOUT_SEC * 1000; while (!serverRunnable.isReady()) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java index a1704a0ec3013..2f3de132bd45e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java @@ -47,7 +47,7 @@ import org.apache.hadoop.net.unix.DomainSocket.DomainChannel; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.io.Files; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -783,7 +783,7 @@ public void run() { } } }; - Thread readerThread = new Thread(reader); + Thread readerThread = new HadoopThread(reader); readerThread.start(); socks[0].getOutputStream().write(1); socks[0].getOutputStream().write(2); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java index f78005a6ed3f2..6d7a5a163fc49 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java @@ -32,6 +32,7 @@ import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -128,7 +129,7 @@ public void testStress() throws Exception { final ArrayList pairs = new ArrayList(); final AtomicInteger handled = new AtomicInteger(0); - final Thread adderThread = new Thread(new Runnable() { + final Thread adderThread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -155,7 +156,7 @@ public boolean handle(DomainSocket sock) { } }); - final Thread removerThread = new Thread(new Runnable() { + final Thread removerThread = new HadoopThread(new Runnable() { @Override public void run() { final Random random = new Random(); @@ -199,7 +200,7 @@ public void testStressInterruption() throws Exception { final ArrayList pairs = new ArrayList(); final AtomicInteger handled = new AtomicInteger(0); - final Thread adderThread = new Thread(new Runnable() { + final Thread adderThread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -227,7 +228,7 @@ public boolean handle(DomainSocket sock) { } }); - final Thread removerThread = new Thread(new Runnable() { + final Thread removerThread = new HadoopThread(new Runnable() { @Override public void run() { final Random random = new Random(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index fafe0bc903d31..df8fd4cb31ecd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -38,9 +38,9 @@ import java.io.IOException; import java.net.InetAddress; import java.net.NetworkInterface; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Enumeration; +import java.util.concurrent.Callable; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.fail; @@ -117,9 +117,9 @@ public void testCreateProxyUser() throws Exception { UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUser( PROXY_USER_NAME, realUserUgi); UserGroupInformation curUGI = proxyUserUgi - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public UserGroupInformation run() throws IOException { + public UserGroupInformation call() throws IOException { return UserGroupInformation.getCurrentUser(); } }); @@ -130,9 +130,9 @@ public UserGroupInformation run() throws IOException { private void checkRemoteUgi(final UserGroupInformation ugi, final Configuration conf) throws Exception { - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws ServiceException { + public Void call() throws ServiceException { client = getClient(addr, conf); String currentUser = client.getCurrentUser(null, newEmptyRequest()).getUser(); @@ -233,9 +233,9 @@ public void testRealUserIPAuthorizationFailure() throws IOException { UserGroupInformation proxyUserUgi = UserGroupInformation .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public String run() throws ServiceException { + public String call() throws ServiceException { client = getClient(addr, conf); return client.getCurrentUser(null, newEmptyRequest()).getUser(); @@ -269,9 +269,9 @@ public void testRealUserIPNotSpecified() throws IOException { UserGroupInformation proxyUserUgi = UserGroupInformation .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public String run() throws ServiceException { + public String call() throws ServiceException { client = getClient(addr, conf); return client.getCurrentUser(null, newEmptyRequest()).getUser(); @@ -302,9 +302,9 @@ public void testRealUserGroupNotSpecified() throws IOException { UserGroupInformation proxyUserUgi = UserGroupInformation .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public String run() throws ServiceException { + public String call() throws ServiceException { client = getClient(addr, conf); return client.getCurrentUser(null, newEmptyRequest()).getUser(); @@ -340,9 +340,9 @@ public void testRealUserGroupAuthorizationFailure() throws IOException { UserGroupInformation proxyUserUgi = UserGroupInformation .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public String run() throws ServiceException { + public String call() throws ServiceException { client = getClient(addr, conf); return client.getCurrentUser(null, newEmptyRequest()).getUser(); @@ -386,9 +386,9 @@ public void testProxyWithToken() throws Exception { refreshConf(conf); - String retVal = proxyUserUgi.doAs(new PrivilegedExceptionAction() { + String retVal = proxyUserUgi.callAs(new Callable() { @Override - public String run() throws Exception { + public String call() throws Exception { try { client = getClient(addr, conf); return client.getCurrentUser(null, @@ -430,9 +430,9 @@ public void testTokenBySuperUser() throws Exception { Token token = new Token<>(tokenId, sm); SecurityUtil.setTokenService(token, addr); current.addToken(token); - String retVal = current.doAs(new PrivilegedExceptionAction() { + String retVal = current.callAs(new Callable() { @Override - public String run() throws Exception { + public String call() throws Exception { try { client = getClient(addr, newConf); return client.getCurrentUser(null, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java index 9847f05f1a0f2..8c0d4da2e5fac 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java @@ -22,9 +22,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; -import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.Callable; import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosTicket; @@ -80,10 +80,10 @@ public void test() throws Exception { UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal, keytabFile.getCanonicalPath()); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { SaslClient client = Sasl.createSaslClient( new String[] {AuthMethod.KERBEROS.getMechanismName()}, clientPrincipal, server1Protocol, host, props, null); @@ -115,10 +115,10 @@ public Void run() throws Exception { + "please reconsider the problem in HADOOP-13433"); // should fail as we send a service ticket instead of tgt to KDC. intercept(SaslException.class, - () -> ugi.doAs(new PrivilegedExceptionAction() { + () -> ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { SaslClient client = Sasl.createSaslClient( new String[] {AuthMethod.KERBEROS.getMechanismName()}, clientPrincipal, server2Protocol, host, props, null); @@ -139,10 +139,10 @@ public Void run() throws Exception { "The first ticket is not tgt"); // make sure we can still get new service ticket after the fix. - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { SaslClient client = Sasl.createSaslClient( new String[] {AuthMethod.KERBEROS.getMechanismName()}, clientPrincipal, server2Protocol, host, props, null); @@ -163,10 +163,10 @@ public void testWithDestroyedTGT() throws Exception { UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal, keytabFile.getCanonicalPath()); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { SaslClient client = Sasl.createSaslClient( new String[] {AuthMethod.KERBEROS.getMechanismName()}, clientPrincipal, server1Protocol, host, props, null); @@ -199,10 +199,10 @@ public Void run() throws Exception { // should fail as we send a service ticket instead of tgt to KDC. intercept(SaslException.class, - () -> ugi.doAs(new PrivilegedExceptionAction() { + () -> ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { SaslClient client = Sasl.createSaslClient( new String[] {AuthMethod.KERBEROS.getMechanismName()}, clientPrincipal, server2Protocol, host, props, null); @@ -216,10 +216,10 @@ public Void run() throws Exception { ugi.reloginFromKeytab(); // make sure we can get new service ticket after the relogin. - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { SaslClient client = Sasl.createSaslClient( new String[] {AuthMethod.KERBEROS.getMechanismName()}, clientPrincipal, server2Protocol, host, props, null); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java index 1bb43ffcd5eb5..464a1ae4878bd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java @@ -59,7 +59,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.security.alias.JavaKeyStoreProvider; import org.apache.hadoop.test.GenericTestUtils; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -414,7 +414,7 @@ public void testLdapConnectionTimeout() // Below we create a LDAP server which will accept a client request; // but it will never reply to the bind (connect) request. // Client of this LDAP server is expected to get a connection timeout. - final Thread ldapServer = new Thread(new Runnable() { + final Thread ldapServer = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -469,7 +469,7 @@ public void testLdapReadTimeout() throws IOException, InterruptedException { // authenticate it successfully; but it will never reply to the following // query request. // Client of this LDAP server is expected to get a read timeout. - final Thread ldapServer = new Thread(new Runnable() { + final Thread ldapServer = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRaceWhenRelogin.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRaceWhenRelogin.java index 473afcc6c44a7..f8cad178dfb4a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRaceWhenRelogin.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRaceWhenRelogin.java @@ -21,10 +21,10 @@ import java.io.File; import java.io.IOException; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; @@ -114,10 +114,10 @@ private void relogin(AtomicBoolean pass) { private void getServiceTicket(AtomicBoolean running, String serverProtocol) { while (running.get()) { try { - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { SaslClient client = Sasl.createSaslClient( new String[] {AuthMethod.KERBEROS.getMechanismName()}, clientPrincipal, serverProtocol, host, props, null); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java index f5eff7065c865..2f1530add6760 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java @@ -50,7 +50,6 @@ import java.io.IOException; import java.nio.file.Path; import java.security.Principal; -import java.security.PrivilegedExceptionAction; import java.util.Iterator; import java.util.Set; import java.util.concurrent.Callable; @@ -355,9 +354,9 @@ public void testReloginForUGIFromSubject() throws Exception { principal1.getName(), keytab1.getPath()); final UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); - loginUser.doAs(new PrivilegedExceptionAction() { + loginUser.callAs(new Callable() { @Override - public Void run() throws IOException { + public Void call() throws IOException { KerberosTicket loginTicket = checkTicketAndKeytab(loginUser, principal1, true); @@ -405,9 +404,9 @@ public void testReloginForLoginFromSubject() throws Exception { UserGroupInformation.getLoginUser(); assertNotNull(getUser(originalLoginUser.getSubject()).getLogin()); - originalLoginUser.doAs(new PrivilegedExceptionAction() { + originalLoginUser.callAs(new Callable() { @Override - public Void run() throws IOException { + public Void call() throws IOException { KerberosTicket originalLoginUserTicket = checkTicketAndKeytab(originalLoginUser, principal1, true); @@ -560,18 +559,18 @@ public UserGroupInformation call() throws Exception { // concurrent UGI instantiation should not block and again should // know it's supposed to be from a keytab. - loginUgi.doAs(new PrivilegedExceptionAction(){ + loginUgi.callAs(new Callable(){ @Override - public Void run() throws Exception { + public Void call() throws Exception { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); assertEquals(principal.getName(), ugi.getUserName()); assertTrue(ugi.isFromKeytab()); return null; } }); - clonedUgi.doAs(new PrivilegedExceptionAction(){ + clonedUgi.callAs(new Callable(){ @Override - public Void run() throws Exception { + public Void call() throws Exception { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); assertEquals(principal.getName(), ugi.getUserName()); assertTrue(ugi.isFromKeytab()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java index 2fb5b6c22eb71..bc3a2bfd66cf4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java @@ -32,6 +32,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.SubjectUtil; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -57,7 +58,6 @@ import java.io.InputStreamReader; import java.lang.reflect.Method; import java.security.Principal; -import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.ConcurrentModificationException; import java.util.Date; @@ -241,9 +241,9 @@ public void testLogin() throws Exception { UserGroupInformation userGroupInfo = UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES); UserGroupInformation curUGI = - userGroupInfo.doAs(new PrivilegedExceptionAction(){ + userGroupInfo.callAs(new Callable(){ @Override - public UserGroupInformation run() throws IOException { + public UserGroupInformation call() throws IOException { return UserGroupInformation.getCurrentUser(); }}); // make sure in the scope of the doAs, the right user is current @@ -306,9 +306,9 @@ public void testGetServerSideGroups() throws IOException, final UserGroupInformation fakeUser = UserGroupInformation.createRemoteUser("foo.bar"); - fakeUser.doAs(new PrivilegedExceptionAction(){ + fakeUser.callAs(new Callable(){ @Override - public Object run() throws IOException { + public Object call() throws IOException { UserGroupInformation current = UserGroupInformation.getCurrentUser(); assertFalse(current.equals(login)); assertEquals(current, fakeUser); @@ -711,9 +711,9 @@ public void testUGITokens() throws Exception { // ensure that the tokens are passed through doAs Collection> otherSet = - ugi.doAs(new PrivilegedExceptionAction>>(){ + ugi.callAs(new Callable>>(){ @Override - public Collection> run() throws IOException { + public Collection> call() throws IOException { return UserGroupInformation.getCurrentUser().getTokens(); } }); @@ -739,9 +739,9 @@ public void testTokenIdentifiers() throws Exception { // ensure that the token identifiers are passed through doAs Collection otherSet = ugi - .doAs(new PrivilegedExceptionAction>() { + .callAs(new Callable>() { @Override - public Collection run() throws IOException { + public Collection call() throws IOException { return UserGroupInformation.getCurrentUser().getTokenIdentifiers(); } }); @@ -770,9 +770,9 @@ public void testUGIAuthMethod() throws Exception { final AuthenticationMethod am = AuthenticationMethod.KERBEROS; ugi.setAuthenticationMethod(am); assertEquals(am, ugi.getAuthenticationMethod()); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Object run() throws IOException { + public Object call() throws IOException { assertEquals(am, UserGroupInformation.getCurrentUser() .getAuthenticationMethod()); return null; @@ -793,9 +793,9 @@ public void testUGIAuthMethodInRealUser() throws Exception { proxyUgi.getAuthenticationMethod()); assertEquals(am, UserGroupInformation .getRealAuthenticationMethod(proxyUgi)); - proxyUgi.doAs(new PrivilegedExceptionAction() { + proxyUgi.callAs(new Callable() { @Override - public Object run() throws IOException { + public Object call() throws IOException { assertEquals(AuthenticationMethod.PROXY, UserGroupInformation .getCurrentUser().getAuthenticationMethod()); assertEquals(am, UserGroupInformation.getCurrentUser() @@ -884,9 +884,9 @@ private static void verifyGroupMetrics( @Timeout(value = 30) public void testUGIUnderNonHadoopContext() throws Exception { Subject nonHadoopSubject = new Subject(); - Subject.doAs(nonHadoopSubject, new PrivilegedExceptionAction() { + SubjectUtil.callAs(nonHadoopSubject, new Callable() { @Override - public Void run() throws IOException { + public Void call() throws IOException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); assertNotNull(ugi); return null; @@ -992,9 +992,9 @@ public void testPrivateTokenExclusion() throws Exception { public void testTokenRaceCondition() throws Exception { UserGroupInformation userGroupInfo = UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES); - userGroupInfo.doAs(new PrivilegedExceptionAction(){ + userGroupInfo.callAs(new Callable(){ @Override - public Void run() throws Exception { + public Void call() throws Exception { // make sure it is not the same as the login user because we use the // same UGI object for every instantiation of the login user and you // won't run into the race condition otherwise @@ -1098,9 +1098,9 @@ public void testCheckTGTAfterLoginFromSubject() throws Exception { subject.getPrivateCredentials().add(keytab); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws IOException { + public Void call() throws IOException { UserGroupInformation.loginUserFromSubject(subject); // this should not throw. UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); @@ -1226,10 +1226,10 @@ public String answer(InvocationOnMock invocation) throws Throwable { new Callable(){ @Override public UserGroupInformation call() throws Exception { - return testUgi1.doAs( - new PrivilegedExceptionAction() { + return testUgi1.callAs( + new Callable() { @Override - public UserGroupInformation run() throws Exception { + public UserGroupInformation call() throws Exception { return UserGroupInformation.getCurrentUser(); } }); @@ -1244,19 +1244,19 @@ public UserGroupInformation run() throws Exception { principals.add(user); // concurrent getCurrentUser on ugi1 should not be blocked. UserGroupInformation ugi; - ugi = testUgi1.doAs( - new PrivilegedExceptionAction() { + ugi = testUgi1.callAs( + new Callable() { @Override - public UserGroupInformation run() throws Exception { + public UserGroupInformation call() throws Exception { return UserGroupInformation.getCurrentUser(); } }); assertSame(testUgi1.getSubject(), ugi.getSubject()); // concurrent getCurrentUser on ugi2 should not be blocked. - ugi = testUgi2.doAs( - new PrivilegedExceptionAction() { + ugi = testUgi2.callAs( + new Callable() { @Override - public UserGroupInformation run() throws Exception { + public UserGroupInformation call() throws Exception { return UserGroupInformation.getCurrentUser(); } }); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java index 25756dd427794..6e16644226818 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java @@ -33,6 +33,7 @@ import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.SubjectUtil; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.servlet.ServletContextHandler; @@ -65,8 +66,6 @@ import java.net.URL; import java.nio.charset.StandardCharsets; import java.security.Principal; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; @@ -76,6 +75,7 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -417,9 +417,9 @@ private void testDelegationTokenAuthenticatorCalls(final boolean useQS) UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ugi.addToken(token.getDelegationToken()); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { HttpURLConnection conn = aUrl.openConnection(nonAuthURL, new DelegationTokenAuthenticatedURL.Token()); assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode()); @@ -551,9 +551,9 @@ private void testDelegationTokenAuthenticatedURLWithNoDT( final URL url = new URL(getJettyURL() + "/foo/bar"); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { DelegationTokenAuthenticatedURL.Token token = new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl = @@ -621,9 +621,9 @@ public void testFallbackToPseudoDelegationTokenAuthenticator() final URL url = new URL(getJettyURL() + "/foo/bar"); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { DelegationTokenAuthenticatedURL.Token token = new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl = @@ -709,8 +709,14 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) { } } + @Deprecated public static T doAsKerberosUser(String principal, String keytab, final Callable callable) throws Exception { + return callAsKerberosUser(principal, keytab, callable); + } + + public static T callAsKerberosUser(String principal, String keytab, + final Callable callable) throws Exception { LoginContext loginContext = null; try { Set principals = new HashSet(); @@ -721,14 +727,19 @@ public static T doAsKerberosUser(String principal, String keytab, new KerberosConfiguration(principal, keytab)); loginContext.login(); subject = loginContext.getSubject(); - return Subject.doAs(subject, new PrivilegedExceptionAction() { + return SubjectUtil.callAs(subject, new Callable() { @Override - public T run() throws Exception { + public T call() throws Exception { return callable.call(); } }); - } catch (PrivilegedActionException ex) { - throw ex.getException(); + } catch (CompletionException ex) { + Throwable cause = ex.getCause(); + if (cause instanceof Exception) { + throw (Exception) cause; + } else { + throw new RuntimeException(cause); + } } finally { if (loginContext != null) { loginContext.logout(); @@ -866,9 +877,9 @@ public void testProxyUser() throws Exception { assertEquals(OK_USER, ret.get(0)); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { DelegationTokenAuthenticatedURL.Token token = new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl = @@ -949,9 +960,9 @@ public void testHttpUGI() throws Exception { final URL url = new URL(getJettyURL() + "/foo/bar"); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { DelegationTokenAuthenticatedURL.Token token = new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl = @@ -1011,9 +1022,9 @@ public void testIpaddressCheck() throws Exception { final URL url = new URL(getJettyURL() + "/foo/bar"); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { DelegationTokenAuthenticatedURL.Token token = new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl = diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java index ccbc0a009fbf5..0121d44678e6e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java @@ -25,6 +25,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceStateChangeListener; import org.apache.hadoop.service.ServiceStateException; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -404,7 +405,7 @@ private AsyncSelfTerminatingService(int timeout) { @Override protected void serviceStart() throws Exception { - new Thread(this).start(); + new HadoopThread(this).start(); super.serviceStart(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java index 3093aa1ff5d58..6d508c668ca0b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,7 +59,7 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - Thread thread = new Thread(this); + Thread thread = new HadoopThread(this); thread.setName(getName()); thread.start(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java index e0f3c87518993..fc9fe066f9645 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java @@ -785,7 +785,7 @@ public static T notNull(String message, Callable eval) */ public static T doAs(UserGroupInformation user, Callable eval) throws IOException, InterruptedException { - return user.doAs(new PrivilegedOperation<>(eval)); + return user.callAs(eval); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java index fb9e773c7d06e..6e7d0e36a93c3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java @@ -26,6 +26,7 @@ import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -253,7 +254,7 @@ void logWaitWarning(long lockHeldTime, SuppressedSnapshot stats) { private Thread lockUnlockThread(Lock lock) throws InterruptedException { CountDownLatch countDownLatch = new CountDownLatch(1); - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { try { assertFalse(lock.tryLock()); countDownLatch.countDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java index dd8bc45dc9dc2..be171cb405803 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.util; -import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.crypto.OpensslCipher; import org.apache.hadoop.io.compress.zlib.ZlibFactory; -import org.apache.hadoop.util.NativeCodeLoader; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownThreadsHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownThreadsHelper.java index 6bad38b0e1b48..648d70143120d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownThreadsHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownThreadsHelper.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.util; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import java.util.concurrent.ScheduledThreadPoolExecutor; @@ -39,7 +40,7 @@ public void run() { @Test @Timeout(value = 3) public void testShutdownThread() { - Thread thread = new Thread(sampleRunnable); + Thread thread = new HadoopThread(sampleRunnable); thread.start(); boolean ret = ShutdownThreadsHelper.shutdownThread(thread); boolean isTerminated = !thread.isAlive(); diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java index 8fa8d91195441..33d8ff4476bd4 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java @@ -51,12 +51,12 @@ import java.io.IOException; import java.net.URI; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.EnumSet; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import static org.apache.hadoop.crypto.key.kms.server.KMSACLs.INVALIDATE_CACHE_TYPES; import static org.apache.hadoop.util.KMSUtil.checkNotEmpty; @@ -163,10 +163,10 @@ public Response createKey(Map jsonKey) throws Exception { options.setDescription(description); options.setAttributes(attributes); - KeyProvider.KeyVersion keyVersion = user.doAs( - new PrivilegedExceptionAction() { + KeyProvider.KeyVersion keyVersion = user.callAs( + new Callable() { @Override - public KeyVersion run() throws Exception { + public KeyVersion call() throws Exception { KeyProvider.KeyVersion keyVersion = (material != null) ? provider.createKey(name, Base64.decodeBase64(material), options) @@ -205,9 +205,9 @@ public Response deleteKey(@PathParam("name") final String name) assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name); checkNotEmpty(name, "name"); LOG.debug("Deleting key with name {}.", name); - user.doAs(new PrivilegedExceptionAction() { + user.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { provider.deleteKey(name); provider.flush(); return null; @@ -243,10 +243,10 @@ public Response rolloverKey(@PathParam("name") final String name, KMSOp.ROLL_NEW_VERSION, name); } - KeyProvider.KeyVersion keyVersion = user.doAs( - new PrivilegedExceptionAction() { + KeyProvider.KeyVersion keyVersion = user.callAs( + new Callable() { @Override - public KeyVersion run() throws Exception { + public KeyVersion call() throws Exception { KeyVersion keyVersion = (material != null) ? provider.rollNewVersion(name, Base64.decodeBase64(material)) @@ -287,9 +287,9 @@ public Response invalidateCache(@PathParam("name") final String name) assertAccess(INVALIDATE_CACHE_TYPES, user, KMSOp.INVALIDATE_CACHE, name); LOG.debug("Invalidating cache with key name {}.", name); - user.doAs(new PrivilegedExceptionAction() { + user.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { provider.invalidateCache(name); provider.flush(); return null; @@ -318,10 +318,10 @@ public Response getKeysMetadata(@QueryParam(KMSRESTConstants.KEY) new String[keyNamesList.size()]); assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA); - KeyProvider.Metadata[] keysMeta = user.doAs( - new PrivilegedExceptionAction() { + KeyProvider.Metadata[] keysMeta = user.callAs( + new Callable() { @Override - public KeyProvider.Metadata[] run() throws Exception { + public KeyProvider.Metadata[] call() throws Exception { return provider.getKeysMetadata(keyNames); } } @@ -348,10 +348,10 @@ public Response getKeyNames() throws Exception { UserGroupInformation user = HttpUserGroupInformation.get(); assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS); - List json = user.doAs( - new PrivilegedExceptionAction>() { + List json = user.callAs( + new Callable>() { @Override - public List run() throws Exception { + public List call() throws Exception { return provider.getKeys(); } } @@ -396,10 +396,10 @@ public Response getMetadata(@PathParam("name") final String name) assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name); LOG.debug("Getting metadata for key with name {}.", name); - KeyProvider.Metadata metadata = user.doAs( - new PrivilegedExceptionAction() { + KeyProvider.Metadata metadata = user.callAs( + new Callable() { @Override - public KeyProvider.Metadata run() throws Exception { + public KeyProvider.Metadata call() throws Exception { return provider.getMetadata(name); } } @@ -430,10 +430,10 @@ public Response getCurrentVersion(@PathParam("name") final String name) assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name); LOG.debug("Getting key version for key with name {}.", name); - KeyVersion keyVersion = user.doAs( - new PrivilegedExceptionAction() { + KeyVersion keyVersion = user.callAs( + new Callable() { @Override - public KeyVersion run() throws Exception { + public KeyVersion call() throws Exception { return provider.getCurrentKey(name); } } @@ -463,10 +463,10 @@ public Response getKeyVersion( assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION); LOG.debug("Getting key with version name {}.", versionName); - KeyVersion keyVersion = user.doAs( - new PrivilegedExceptionAction() { + KeyVersion keyVersion = user.callAs( + new Callable() { @Override - public KeyVersion run() throws Exception { + public KeyVersion call() throws Exception { return provider.getKeyVersion(versionName); } } @@ -513,10 +513,10 @@ public Response generateEncryptedKeys( new LinkedList(); try { - user.doAs( - new PrivilegedExceptionAction() { + user.callAs( + new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { LOG.debug("Generated Encrypted key for {} number of " + "keys.", numKeys); for (int i = 0; i < numKeys; i++) { @@ -591,9 +591,9 @@ public Response reencryptEncryptedKeys( "All EncryptedKeys must be under the given key name " + name); } - user.doAs(new PrivilegedExceptionAction() { + user.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { provider.reencryptEncryptedKeys(ekvs); return null; } @@ -649,10 +649,10 @@ public Response handleEncryptedKeyOp( assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName); - KeyProvider.KeyVersion retKeyVersion = user.doAs( - new PrivilegedExceptionAction() { + KeyProvider.KeyVersion retKeyVersion = user.callAs( + new Callable() { @Override - public KeyVersion run() throws Exception { + public KeyVersion call() throws Exception { return provider.decryptEncryptedKey( new KMSClientProvider.KMSEncryptedKeyVersion( keyName, versionName, iv, @@ -671,9 +671,9 @@ public KeyVersion run() throws Exception { keyName); EncryptedKeyVersion retEncryptedKeyVersion = - user.doAs(new PrivilegedExceptionAction() { + user.callAs(new Callable() { @Override - public EncryptedKeyVersion run() throws Exception { + public EncryptedKeyVersion call() throws Exception { return provider.reencryptEncryptedKey( new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName, iv, KeyProviderCryptoExtension.EEK, @@ -717,10 +717,10 @@ public Response getKeyVersions(@PathParam("name") final String name) assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name); LOG.debug("Getting key versions for key {}", name); - List ret = user.doAs( - new PrivilegedExceptionAction>() { + List ret = user.callAs( + new Callable>() { @Override - public List run() throws Exception { + public List call() throws Exception { return provider.getKeyVersions(name); } } diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java index e99c49f7dc6a8..113e5bf84c4c1 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java @@ -29,6 +29,7 @@ import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.xbill.DNS.CNAMERecord; @@ -174,7 +175,7 @@ public RegistryDNS(String name) { @Override public Thread newThread(Runnable r) { - return new Thread(r, + return new HadoopThread(r, "RegistryDNS " + counter.getAndIncrement()); } diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java index 7fe6f2d0afdd7..64dd6116115b2 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java @@ -37,6 +37,7 @@ import org.apache.hadoop.registry.client.types.RegistryPathStatus; import org.apache.hadoop.registry.client.types.ServiceRecord; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; @@ -115,7 +116,7 @@ public RegistryAdminService(String name, @Override public Thread newThread(Runnable r) { - return new Thread(r, + return new HadoopThread(r, "RegistryAdminService " + counter.getAndIncrement()); } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index 8d13640eadb18..7caa88d6d65f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -693,7 +693,7 @@ private boolean shouldStop() { * and closes them. Any error recovery is also done by this thread. */ @Override - public void run() { + public void work() { TraceScope scope = null; while (!streamerClosed && dfsClient.clientRunning) { // if the Responder encountered an error, shutdown Responder @@ -1167,7 +1167,7 @@ private class ResponseProcessor extends Daemon { } @Override - public void run() { + public void work() { setName("ResponseProcessor for block " + block); PipelineAck ack = new PipelineAck(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java index 465497ffb9b60..154eacb5cd3ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -250,7 +251,7 @@ public DeadNodeDetector(String name, Configuration conf) { } @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { clearAndGetDetectedDeadNodes(); LOG.debug("Current detector state {}, the detected nodes: {}.", state, @@ -322,12 +323,12 @@ static void setDisabledProbeThreadForTest( @VisibleForTesting void startProbeScheduler() { probeDeadNodesSchedulerThr = - new Thread(new ProbeScheduler(this, ProbeType.CHECK_DEAD)); + new HadoopThread(new ProbeScheduler(this, ProbeType.CHECK_DEAD)); probeDeadNodesSchedulerThr.setDaemon(true); probeDeadNodesSchedulerThr.start(); probeSuspectNodesSchedulerThr = - new Thread(new ProbeScheduler(this, ProbeType.CHECK_SUSPECT)); + new HadoopThread(new ProbeScheduler(this, ProbeType.CHECK_SUSPECT)); probeSuspectNodesSchedulerThr.setDaemon(true); probeSuspectNodesSchedulerThr.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/LocatedBlocksRefresher.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/LocatedBlocksRefresher.java index 454d1f9cd93e4..be8fd94247abb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/LocatedBlocksRefresher.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/LocatedBlocksRefresher.java @@ -99,7 +99,7 @@ public Thread newThread(Runnable r) { } @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { if (!waitForInterval()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java index a69ae329c39f0..33130fd535035 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java @@ -22,12 +22,12 @@ import java.io.FileInputStream; import java.io.IOException; import java.nio.ByteBuffer; -import java.security.PrivilegedExceptionAction; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; +import java.util.concurrent.Callable; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -110,9 +110,9 @@ private synchronized ClientDatanodeProtocol getDatanodeProxy( final boolean connectToDnViaHostname) throws IOException { if (proxy == null) { try { - proxy = ugi.doAs(new PrivilegedExceptionAction() { + proxy = ugi.callAs(new Callable() { @Override - public ClientDatanodeProtocol run() throws Exception { + public ClientDatanodeProtocol call() throws Exception { return DFSUtilClient.createClientDatanodeProtocolProxy(node, conf, socketTimeout, connectToDnViaHostname); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java index 33f4934e5489d..6be57477976ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java @@ -42,7 +42,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -144,7 +144,7 @@ public DatanodeAdminProperties[] call() throws Exception { } }); - Thread thread = new Thread(futureTask); + Thread thread = new HadoopThread(futureTask); thread.start(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 29d7fbed8963e..d757de1a6370e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -37,7 +37,6 @@ import java.net.URI; import java.net.URL; import java.nio.charset.StandardCharsets; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Base64; import java.util.Base64.Decoder; @@ -49,6 +48,7 @@ import java.util.Optional; import java.util.Set; import java.util.StringTokenizer; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import javax.ws.rs.core.MediaType; @@ -682,10 +682,10 @@ T run() throws IOException { try { // the entire lifecycle of the connection must be run inside the // doAs to ensure authentication is performed correctly - return connectUgi.doAs( - new PrivilegedExceptionAction() { + return connectUgi.callAs( + new Callable() { @Override - public T run() throws IOException { + public T call() throws IOException { return runWithRetry(); } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java index cceb01b31cd16..6204f3ddc260c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java @@ -28,6 +28,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.slf4j.event.Level; @@ -38,6 +39,7 @@ import java.net.URISyntaxException; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -46,6 +48,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Timeout.ThreadMode.SEPARATE_THREAD; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -344,6 +347,8 @@ private void testResolveDomainNameUsingDNS(boolean useFQDN) throws Exception { assertTrue(nn2Count.get() > 0, "nn2 should have been selected: " + nn2Count.get()); } + // FIXME Sometimes java.net.Inet4AddressImpl.lookupAllHostAddr() hangs. Why ? + @Timeout(value=1, unit = TimeUnit.MINUTES, threadMode = SEPARATE_THREAD) @Test public void testResolveDomainNameUsingDNS() throws Exception { // test resolving to IP diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java index 8c913377c01fd..87e8e93eacbbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java @@ -45,6 +45,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -677,9 +678,9 @@ public long[] answer(InvocationOnMock invocation) throws Throwable { assertEquals(1, stats[0]); assertEquals(1, counter.get()); - Thread t = new Thread() { + Thread t = new HadoopThread() { @Override - public void run() { + public void work() { try { // Fail over between calling delayProxy.getStats() and throw // exception. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java index c12b6831d45db..80d2859684fa5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java @@ -25,6 +25,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -226,7 +227,7 @@ static void waitForAll(List> furtures) throws Exception { } } - static class AllocatorThread extends Thread { + static class AllocatorThread extends HadoopThread { private final ByteArrayManager bam; private final int arrayLength; private byte[] array; @@ -237,7 +238,7 @@ static class AllocatorThread extends Thread { } @Override - public void run() { + public void work() { try { array = bam.newByteArray(arrayLength); } catch (InterruptedException e) { @@ -333,9 +334,9 @@ public void testByteArrayManager() throws Exception { } final List exceptions = new ArrayList(); - final Thread randomRecycler = new Thread() { + final Thread randomRecycler = new HadoopThread() { @Override - public void run() { + public void work() { LOG.info("randomRecycler start"); for(int i = 0; shouldRun(); i++) { final int j = ThreadLocalRandom.current().nextInt(runners.length); @@ -524,7 +525,7 @@ public void run() { Thread start(int n) { this.n = n; - final Thread t = new Thread(this); + final Thread t = new HadoopThread(this); t.start(); return t; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index df78938c20cc6..f9f5970208f57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -96,10 +96,10 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URL; -import java.security.PrivilegedExceptionAction; import java.text.MessageFormat; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.Callable; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; @@ -362,10 +362,10 @@ private HttpURLConnection getConnection(final String method, } final URL url = HttpFSUtils.createURL(path, params, multiValuedParams); try { - return UserGroupInformation.getCurrentUser().doAs( - new PrivilegedExceptionAction() { + return UserGroupInformation.getCurrentUser().callAs( + new Callable() { @Override - public HttpURLConnection run() throws Exception { + public HttpURLConnection call() throws Exception { return getConnection(url, method); } } @@ -1291,10 +1291,10 @@ public void readFields(DataInput in) throws IOException { public Token getDelegationToken(final String renewer) throws IOException { try { - return UserGroupInformation.getCurrentUser().doAs( - new PrivilegedExceptionAction>() { + return UserGroupInformation.getCurrentUser().callAs( + new Callable>() { @Override - public Token run() throws Exception { + public Token call() throws Exception { return authURL.getDelegationToken(uri.toURL(), authToken, renewer); } @@ -1311,10 +1311,10 @@ public Token run() throws Exception { public long renewDelegationToken(final Token token) throws IOException { try { - return UserGroupInformation.getCurrentUser().doAs( - new PrivilegedExceptionAction() { + return UserGroupInformation.getCurrentUser().callAs( + new Callable() { @Override - public Long run() throws Exception { + public Long call() throws Exception { return authURL.renewDelegationToken(uri.toURL(), authToken); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 7eb94783651a0..75fa55cbf2b56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -92,12 +92,12 @@ import java.net.URI; import java.nio.charset.StandardCharsets; import java.security.AccessControlException; -import java.security.PrivilegedExceptionAction; import java.text.MessageFormat; import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Callable; /** * Main class of HttpFSServer server. @@ -284,9 +284,9 @@ public Response get(@PathParam("path") String path, .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { - is = ugi.doAs(new PrivilegedExceptionAction() { + is = ugi.callAs(new Callable() { @Override - public InputStream run() throws Exception { + public InputStream call() throws Exception { return command.execute(fs); } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/KerberosTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/KerberosTestUtils.java index 4d289ad1ddd85..00d5cfe4f6143 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/KerberosTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/KerberosTestUtils.java @@ -20,16 +20,16 @@ import javax.security.auth.login.LoginContext; import org.apache.hadoop.security.authentication.util.KerberosUtil; +import org.apache.hadoop.util.SubjectUtil; import java.io.File; import java.security.Principal; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; /** * Test helper class for Java Kerberos setup. @@ -99,27 +99,34 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) { } } - public static T doAs(String principal, final Callable callable) - throws Exception { + @Deprecated + public static T doAs(String principal, final Callable callable) throws Exception { + return callAs(principal, callable); + } + + + public static T callAs(String principal, final Callable callable) throws Exception { LoginContext loginContext = null; try { - Set principals = new HashSet(); - principals.add( - new KerberosPrincipal(KerberosTestUtils.getClientPrincipal())); - Subject subject = new Subject(false, principals, new HashSet(), - new HashSet()); - loginContext = new LoginContext("", subject, null, - new KerberosConfiguration(principal)); + Set principals = new HashSet<>(); + principals.add(new KerberosPrincipal(KerberosTestUtils.getClientPrincipal())); + Subject subject = new Subject(false, principals, new HashSet<>(), new HashSet<>()); + loginContext = new LoginContext("", subject, null, new KerberosConfiguration(principal)); loginContext.login(); subject = loginContext.getSubject(); - return Subject.doAs(subject, new PrivilegedExceptionAction() { + return SubjectUtil.callAs(subject, new Callable() { @Override - public T run() throws Exception { + public T call() throws Exception { return callable.call(); } }); - } catch (PrivilegedActionException ex) { - throw ex.getException(); + } catch (CompletionException ex) { + Throwable cause = ex.getCause(); + if (cause instanceof Exception) { + throw (Exception) cause; + } else { + throw new RuntimeException(cause); + } } finally { if (loginContext != null) { loginContext.logout(); @@ -127,12 +134,22 @@ public T run() throws Exception { } } + public static T callAsClient(Callable callable) throws Exception { + return callAs(getClientPrincipal(), callable); + } + + @Deprecated public static T doAsClient(Callable callable) throws Exception { return doAs(getClientPrincipal(), callable); } + public static T callAsServer(Callable callable) throws Exception { + return callAs(getServerPrincipal(), callable); + } + + @Deprecated public static T doAsServer(Callable callable) throws Exception { return doAs(getServerPrincipal(), callable); } -} +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java index cbbcccf3ca0ba..d19e23d85d1cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java @@ -22,6 +22,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,7 +48,7 @@ public AsyncDataService() { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - return new Thread(threadGroup, r); + return new HadoopThread(threadGroup, r); } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java index dc1eb8746964e..1e8fbf5ce5aef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java @@ -20,11 +20,11 @@ import java.io.IOException; import java.net.URI; import java.nio.file.FileSystemException; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map.Entry; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -237,9 +237,9 @@ public DFSClient load(final DfsClientKey key) throws Exception { key.userName, UserGroupInformation.getCurrentUser()); // Guava requires CacheLoader never returns null. - return ugi.doAs(new PrivilegedExceptionAction() { + return ugi.callAs(new Callable() { @Override - public DFSClient run() throws IOException { + public DFSClient call() throws IOException { URI namenodeURI = namenodeUriMap.get(key.namenodeId); if (namenodeURI == null) { throw new IOException("No namenode URI found for user:" + diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index 70ae4b29e9f96..591776e2090be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -246,7 +246,7 @@ void shouldRun(boolean shouldRun) { } @Override - public void run() { + public void work() { while (shouldRun) { scan(streamTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java index 793d2a756ef6e..ca6d68081d608 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java @@ -20,11 +20,11 @@ import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; -import java.security.PrivilegedAction; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.concurrent.Callable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -139,10 +139,10 @@ public Map getDatanodesSubcluster() { try { // We need to get the DNs as a privileged user UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); - Map dnMap = loginUser.doAs( - new PrivilegedAction>() { + Map dnMap = loginUser.callAsNoException( + new Callable>() { @Override - public Map run() { + public Map call() { try { Map result; if (rpcServer.isAsync()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java index 91af1ca06ac7e..3a9a17155416c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -97,7 +98,7 @@ private synchronized void updateSubclusterMapping() { if (subclusterMapping == null || (monotonicNow() - lastUpdated) > minUpdateTime) { // Fetch the mapping asynchronously - Thread updater = new Thread(new Runnable() { + Thread updater = new HadoopThread(new Runnable() { @Override public void run() { final MembershipStore membershipStore = getMembershipStore(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 2ffc5f0b5d893..414936f8d9729 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.eclipse.jetty.util.ajax.JSON; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -458,7 +459,7 @@ public void run() { /** * Thread that creates connections asynchronously. */ - static class ConnectionCreator extends Thread { + static class ConnectionCreator extends HadoopThread { /** If the creator is running. */ private boolean running = true; /** Queue to push work to. */ @@ -470,7 +471,7 @@ static class ConnectionCreator extends Thread { } @Override - public void run() { + public void work() { while (this.running) { try { ConnectionPool pool = this.queue.take(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java index 40ff843fa1dfe..edf108bad8393 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java @@ -25,13 +25,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for updating mount table cache on all the router. */ -public class MountTableRefresherThread extends Thread { +public class MountTableRefresherThread extends HadoopThread { private static final Logger LOG = LoggerFactory.getLogger(MountTableRefresherThread.class); private boolean success; @@ -61,7 +62,7 @@ public MountTableRefresherThread(MountTableManager manager, * update cache on R2 and R3. */ @Override - public void run() { + public void work() { try { SecurityUtil.doAsLoginUser(() -> { if (UserGroupInformation.isSecurityEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 14cc47ffa1e6e..418e209a4fce5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -59,6 +59,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -391,9 +392,9 @@ protected void serviceStop() throws Exception { * Shutdown the router. */ public void shutDown() { - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { Router.this.stop(); } }.start(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java index 772e725788826..85289883f5b17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java @@ -37,8 +37,8 @@ import java.io.IOException; import java.io.InterruptedIOException; -import java.security.PrivilegedExceptionAction; import java.util.List; +import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicInteger; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_FORCE_CLOSE_OPEN_FILE; @@ -114,7 +114,7 @@ public boolean routerFedRename(final String src, final String dst, try { // as router user with saveJournal and task submission privileges - return routerUser.doAs((PrivilegedExceptionAction) () -> { + return routerUser.callAs((Callable) () -> { // Build and submit router federation rename job. BalanceJob job = buildRouterRenameJob(srcLoc.getNameserviceId(), dstLoc.getNameserviceId(), srcLoc.getDest(), dstLoc.getDest()); @@ -154,7 +154,7 @@ private void checkPermission(RemoteLocation src, RemoteLocation dst) UserGroupInformation proxyUser = UserGroupInformation .createProxyUser(remoteUserName, UserGroupInformation.getLoginUser()); - proxyUser.doAs((PrivilegedExceptionAction) () -> { + proxyUser.callAs((Callable) () -> { checkRenamePermission(src, dst); return null; }); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFsckServlet.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFsckServlet.java index a439e5c0ce84e..5496bf62a1960 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFsckServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFsckServlet.java @@ -21,8 +21,8 @@ import java.io.PrintWriter; import java.net.HttpURLConnection; import java.net.InetAddress; -import java.security.PrivilegedExceptionAction; import java.util.Map; +import java.util.concurrent.Callable; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; @@ -56,7 +56,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) final Configuration conf = RouterHttpServer.getConfFromContext(context); final UserGroupInformation ugi = getUGI(request, conf); try { - ugi.doAs((PrivilegedExceptionAction) () -> { + ugi.callAs((Callable) () -> { Router router = RouterHttpServer.getRouterFromContext(context); new RouterFsck(router, pmap, out, remoteAddress).fsck(); return null; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java index 5607ab8109d26..fb3bd83a521a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; import org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,7 +64,7 @@ public RouterHeartbeatService(Router router) { * Trigger the update of the Router state asynchronously. */ protected void updateStateAsync() { - Thread thread = new Thread(this::updateStateStore, "Router Heartbeat Async"); + Thread thread = new HadoopThread(this::updateStateStore, "Router Heartbeat Async"); thread.setDaemon(true); thread.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 77bebab4ade71..fd1059d168a8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -213,6 +213,7 @@ import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.checkerframework.checker.nullness.qual.NonNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -2507,7 +2508,7 @@ private static class AsyncThreadFactory implements ThreadFactory { @Override public Thread newThread(@NonNull Runnable r) { - Thread thread = new Thread(r, namePrefix + threadNumber.getAndIncrement()); + Thread thread = new HadoopThread(r, namePrefix + threadNumber.getAndIncrement()); thread.setDaemon(true); return thread; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java index 3650ce60f7221..d12d9dfcc9c19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -133,7 +134,7 @@ public void testConcurrentRefreshRequests() throws InterruptedException { // Spawn 100 concurrent refresh requests Thread[] threads = new Thread[100]; for (int i = 0; i < 100; i++) { - threads[i] = new Thread(() -> + threads[i] = new HadoopThread(() -> client.refreshFairnessPolicyController(routerContext.getConf())); } @@ -182,7 +183,7 @@ public void testRefreshStaticChangeHandlers() throws Exception { final int newNs1Permits = 4; conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "ns0", newNs0Permits); conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "ns1", newNs1Permits); - Thread threadRefreshController = new Thread(() -> client. + Thread threadRefreshController = new HadoopThread(() -> client. refreshFairnessPolicyController(routerContext.getConf())); threadRefreshController.start(); threadRefreshController.join(); @@ -218,7 +219,7 @@ private List makeDummyInvocations(RouterRpcClient client, final int nThr RemoteMethod dummyMethod = Mockito.mock(RemoteMethod.class); List threadAcquirePermits = new ArrayList<>(); for (int i = 0; i < nThreads; i++) { - Thread threadAcquirePermit = new Thread(() -> { + Thread threadAcquirePermit = new HadoopThread(() -> { try { client.invokeSingle(namespace, dummyMethod); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java index b43c87591d76c..f7345c525e15b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java @@ -45,6 +45,7 @@ import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -318,7 +319,7 @@ public void testCounter() throws Exception { int expectedSchedulerCount = rpcServer.getSchedulerJobCount() + 1; AtomicInteger maxSchedulerCount = new AtomicInteger(); AtomicBoolean watch = new AtomicBoolean(true); - Thread watcher = new Thread(() -> { + Thread watcher = new HadoopThread(() -> { while (watch.get()) { int schedulerCount = rpcServer.getSchedulerJobCount(); if (schedulerCount > maxSchedulerCount.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java index 373743299f032..3857b8ebbfacf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java @@ -324,7 +324,7 @@ protected MountTableRefresherThread getLocalRefresher( String adminAddress) { return new MountTableRefresherThread(null, adminAddress) { @Override - public void run() { + public void work() { try { // Sleep 1 minute Thread.sleep(60000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index d1f02c47e90b8..13e6364774fdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -130,7 +130,7 @@ import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; @@ -3960,7 +3960,7 @@ public void processMisReplicatedBlocks() { reconstructionQueuesInitializer = new Daemon() { @Override - public void run() { + public void work() { try { processMisReplicatesAsync(); } catch (InterruptedException ie) { @@ -5641,7 +5641,7 @@ public int getBlockOpQueueLength() { return blockReportThread.queue.size(); } - private class BlockReportProcessingThread extends Thread { + private class BlockReportProcessingThread extends HadoopThread { private long lastFull = 0; private final BlockingQueue queue; @@ -5653,7 +5653,7 @@ private class BlockReportProcessingThread extends Thread { } @Override - public void run() { + public void work() { try { processQueue(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index 83c179bfe653f..496e2a2f4d933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.util.RwLockMode; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,7 +66,7 @@ * starts up, and at configurable intervals afterwards. */ @InterfaceAudience.LimitedPrivate({"HDFS"}) -public class CacheReplicationMonitor extends Thread implements Closeable { +public class CacheReplicationMonitor extends HadoopThread implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(CacheReplicationMonitor.class); @@ -159,7 +160,7 @@ public CacheReplicationMonitor(FSNamesystem namesystem, } @Override - public void run() { + public void work() { long startTimeMs = 0; Thread.currentThread().setName("CacheReplicationMonitor(" + System.identityHashCode(this) + ")"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java index 798b5fb5966f7..c8d793b745049 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports.DiskOp; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Timer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -152,7 +153,7 @@ public void checkAndUpdateReportIfNecessary() { public void updateSlowDiskReportAsync(long now) { if (isUpdateInProgress.compareAndSet(false, true)) { lastUpdateTime = now; - new Thread(new Runnable() { + new HadoopThread(new Runnable() { @Override public void run() { slowDisksReport = getSlowDisks(diskIDLatencyMap, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 93303bcf807de..c3269b923fb1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -53,7 +53,7 @@ import org.apache.hadoop.io.nativeio.NativeIOException; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -849,8 +849,8 @@ private void deleteAsync(File curDir) throws IOException { deleteDir(curTmp); } rename(curDir, curTmp); - new Thread("Async Delete Current.tmp") { - public void run() { + new HadoopThread("Async Delete Current.tmp") { + public void work() { try { deleteDir(curTmp); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 45eeac85d6b36..31acda1f703a6 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -75,6 +75,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.apache.hadoop.classification.VisibleForTesting; @@ -599,7 +600,7 @@ void start() { //Thread is started already return; } - bpThread = new Thread(this); + bpThread = new HadoopThread(this); bpThread.setDaemon(true); // needed for JUnit testing if (lifelineSender != null) { @@ -1078,7 +1079,7 @@ public void run() { } public void start() { - lifelineThread = new Thread(this, + lifelineThread = new HadoopThread(this, formatThreadName("lifeline", lifelineNnAddr)); lifelineThread.setDaemon(true); lifelineThread.setUncaughtExceptionHandler( @@ -1384,7 +1385,7 @@ public long monotonicNow() { /** * CommandProcessingThread that process commands asynchronously. */ - class CommandProcessingThread extends Thread { + class CommandProcessingThread extends HadoopThread { private final BPServiceActor actor; private final BlockingQueue queue; @@ -1396,7 +1397,7 @@ class CommandProcessingThread extends Thread { } @Override - public void run() { + public void work() { try { processQueue(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java index 1611c3c9ce532..af01ce8fbe489 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java @@ -19,8 +19,8 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; import java.util.*; +import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.classification.InterfaceAudience; @@ -122,10 +122,10 @@ void shutDownAll(List bposList) throws InterruptedException { synchronized void startAll() throws IOException { try { - UserGroupInformation.getLoginUser().doAs( - new PrivilegedExceptionAction() { + UserGroupInformation.getLoginUser().callAs( + new Callable() { @Override - public Object run() throws Exception { + public Object call() throws Exception { for (BPOfferService bpos : offerServices) { bpos.start(); } @@ -275,9 +275,9 @@ private void doRefreshNamenodes( } try { UserGroupInformation.getLoginUser() - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public Object run() throws Exception { + public Object call() throws Exception { bpos.refreshNNList(nsToRefresh, nnIds, addrs, lifelineAddrs); return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index ef778791cfd9c..6715e1d5bcf3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -124,7 +124,6 @@ import java.net.Socket; import java.net.UnknownHostException; import java.nio.channels.ServerSocketChannel; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -262,6 +261,7 @@ import org.apache.hadoop.tracing.TraceUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.tracing.Tracer; import org.eclipse.jetty.util.ajax.JSON; @@ -2381,9 +2381,9 @@ public static InterDatanodeProtocol createInterDataNodeProtocolProxy( final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser(); try { return loginUgi - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public InterDatanodeProtocol run() throws IOException { + public InterDatanodeProtocol call() throws IOException { return new InterDatanodeProtocolTranslatorPB(addr, loginUgi, conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout); } @@ -3855,8 +3855,8 @@ public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException // Asynchronously start the shutdown process so that the rpc response can be // sent back. - Thread shutdownThread = new Thread("Async datanode shutdown thread") { - @Override public void run() { + Thread shutdownThread = new HadoopThread("Async datanode shutdown thread") { + @Override public void work() { if (!shutdownForUpgrade) { // Delay the shutdown a bit if not doing for restart. try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index d8f1e23ec379b..35230be5aebc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,7 +53,7 @@ * VolumeScanner scans a single volume. Each VolumeScanner has its own thread. *

They are all managed by the DataNode's BlockScanner. */ -public class VolumeScanner extends Thread { +public class VolumeScanner extends HadoopThread { public static final Logger LOG = LoggerFactory.getLogger(VolumeScanner.class); @@ -633,7 +634,7 @@ private synchronized ExtendedBlock popNextSuspectBlock() { } @Override - public void run() { + public void work() { // Record the minute on which the scanner started. this.startMinute = TimeUnit.MINUTES.convert(Time.monotonicNow(), TimeUnit.MILLISECONDS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java index e5b23bb60e516..2aa5319c6e8f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -109,7 +110,7 @@ public Thread newThread(Runnable r) { synchronized (this) { thisIndex = counter++; } - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setName("Async disk worker #" + thisIndex + " for volume " + volume); return t; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index 262a24bd3aa45..a9ecdd46bcb8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -49,6 +49,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; class FsVolumeList { private final CopyOnWriteArrayList volumes = @@ -260,8 +261,8 @@ void getAllVolumesMap(final String bpid, new ConcurrentHashMap(); List replicaAddingThreads = new ArrayList(); for (final FsVolumeImpl v : volumes) { - Thread t = new Thread() { - public void run() { + Thread t = new HadoopThread() { + public void work() { try (FsVolumeReference ref = v.obtainReference()) { FsDatasetImpl.LOG.info("Adding replicas to map for block pool " + bpid + " on volume " + v + "..."); @@ -507,8 +508,8 @@ void addBlockPool(final String bpid, final Configuration conf) throws IOExceptio new ConcurrentHashMap(); List blockPoolAddingThreads = new ArrayList(); for (final FsVolumeImpl v : volumes) { - Thread t = new Thread() { - public void run() { + Thread t = new HadoopThread() { + public void work() { try (FsVolumeReference ref = v.obtainReference()) { FsDatasetImpl.LOG.info("Scanning block pool " + bpid + " on volume " + v + "..."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java index 0d42ae99e358e..e295db58d67b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -82,7 +83,7 @@ private void addExecutorForVolume(final String storageId) { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(threadGroup, r); + Thread t = new HadoopThread(threadGroup, r); t.setName("Async RamDisk lazy persist worker " + " for volume with id " + storageId); return t; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java index 8de736ac0e237..86e059d100ad9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java @@ -59,8 +59,8 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; -import java.security.PrivilegedExceptionAction; import java.util.EnumSet; +import java.util.concurrent.Callable; import static io.netty.handler.codec.http.HttpHeaderNames.ACCEPT; import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS; @@ -130,9 +130,9 @@ public void channelRead0(final ChannelHandlerContext ctx, path = params.path(); injectToken(); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { try { handle(ctx, req); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index 29b262598bf55..fb63a13fb33b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -128,7 +128,7 @@ void shutdown() { // The main work loop // @Override - public void run() { + public void work() { // How often to check the size of the edit log (min of checkpointCheckPeriod and checkpointPeriod) long periodMSec = checkpointConf.getCheckPeriod() * 1000; // How often to checkpoint regardless of number of txns diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java index 115e9485fa0a9..8106334307630 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -78,7 +79,7 @@ private boolean isSyncThreadAlive() { private void startSyncThread() { synchronized(syncThreadLock) { if (!isSyncThreadAlive()) { - syncThread = new Thread(this, this.getClass().getSimpleName()); + syncThread = new HadoopThread(this, this.getClass().getSimpleName()); syncThread.start(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index fa321fe85e57b..40ff0829730f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -76,7 +76,7 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.util.Preconditions; @@ -1247,7 +1247,7 @@ private synchronized void saveFSImageInAllDirs(FSNamesystem source, = storage.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { StorageDirectory sd = it.next(); FSImageSaver saver = new FSImageSaver(ctx, sd, nnf); - Thread saveThread = new Thread(saver, saver.toString()); + Thread saveThread = new HadoopThread(saver, saver.toString()); saveThreads.add(saveThread); saveThread.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index edacb7eaafd00..d69ca71759f2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -75,6 +75,7 @@ import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @@ -184,7 +185,7 @@ public LoaderContext getLoaderContext() { * Thread to compute the MD5 of a file as this can be in parallel while * loading the image without interfering much. */ - private static class DigestThread extends Thread { + private static class DigestThread extends HadoopThread { /** * Exception thrown when computing the digest if it cannot be calculated. @@ -219,7 +220,7 @@ public IOException getException() { } @Override - public void run() { + public void work() { try { digest = MD5FileUtils.computeMd5ForFile(file); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java index 059b6531242e8..463f3f1bf0fea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java @@ -20,8 +20,8 @@ import java.io.IOException; import java.io.PrintWriter; import java.net.InetAddress; -import java.security.PrivilegedExceptionAction; import java.util.Map; +import java.util.concurrent.Callable; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; @@ -55,7 +55,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response final UserGroupInformation ugi = getUGI(request, conf); try { - ugi.doAs((PrivilegedExceptionAction) () -> { + ugi.callAs((Callable) () -> { NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final FSNamesystem namesystem = nn.getNamesystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index 442c1aba95b1c..f8bd344e80a86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap; import org.apache.hadoop.hdfs.server.common.Util; @@ -28,7 +29,6 @@ import static org.apache.hadoop.util.Time.monotonicNow; import java.net.HttpURLConnection; -import java.security.PrivilegedExceptionAction; import java.util.*; import java.io.*; @@ -140,9 +140,9 @@ public void doGet(final HttpServletRequest request, validateRequest(context, conf, request, response, nnImage, parsedParams.getStorageInfoString()); - UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction() { + UserGroupInformation.getCurrentUser().callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { if (parsedParams.isGetImage()) { long txid = parsedParams.getTxId(); File imageFile = null; @@ -565,11 +565,11 @@ protected void doPut(final HttpServletRequest request, validateRequest(context, conf, request, response, nnImage, parsedParams.getStorageInfoString()); - UserGroupInformation.getCurrentUser().doAs( - new PrivilegedExceptionAction() { + UserGroupInformation.getCurrentUser().callAs( + new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { // if its not the active NN, then we need to notify the caller it was was the wrong // target (regardless of the fact that we got the image) HAServiceProtocol.HAServiceState state = NameNodeHttpServer diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index d48941203d3f0..47bb47466d6bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -103,7 +103,7 @@ import org.apache.hadoop.util.GcTimeMonitor.Builder; import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.util.Timer; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1076,7 +1076,7 @@ public FileSystem run() throws IOException { return dfs; } }); - this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); + this.emptier = new HadoopThread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); this.emptier.setDaemon(true); this.emptier.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index bda6d25d9945d..daa71ccc74909 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -25,9 +25,8 @@ import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; -import java.security.PrivilegedAction; -import java.security.PrivilegedExceptionAction; import java.util.*; +import java.util.concurrent.Callable; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; @@ -319,14 +318,18 @@ public void shutdown() { @Override public void run() { - SecurityUtil.doAsLoginUserOrFatal( - new PrivilegedAction() { - @Override - public Object run() { - doWork(); - return null; - } - }); + try { + SecurityUtil.callAsLoginUserOrFatalNoException( + new Callable() { + @Override + public Object call() { + doWork(); + return null; + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } } // // The main work loop @@ -407,11 +410,11 @@ static boolean downloadCheckpointFiles( } try { - Boolean b = UserGroupInformation.getCurrentUser().doAs( - new PrivilegedExceptionAction() { + Boolean b = UserGroupInformation.getCurrentUser().callAs( + new Callable() { @Override - public Boolean run() throws Exception { + public Boolean call() throws Exception { dstImage.getStorage().cTime = sig.cTime; // get fsimage diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index abbc2f24e4f5f..5301a5a9e9739 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -32,6 +32,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -119,9 +120,9 @@ public int run(String[] args) throws Exception { SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName()); - return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction() { + return SecurityUtil.callAsLoginUserOrFatalNoException(new Callable() { @Override - public Integer run() { + public Integer call() { try { return doRun(); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index cf416307f47d4..af6cce888a07c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -20,8 +20,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; -import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.Collections; import java.util.Iterator; @@ -37,6 +35,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.Timer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -300,9 +299,9 @@ public void catchupDuringFailover() throws IOException { // Important to do tailing as the login user, in case the shared // edits storage is implemented by a JournalManager that depends // on security credentials to access the logs (eg QuorumJournalManager). - SecurityUtil.doAsLoginUser(new PrivilegedExceptionAction() { + SecurityUtil.callAsLoginUser(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { long editsTailed = 0; // Fully tail the journal to the end do { @@ -475,7 +474,7 @@ void sleep(long sleepTimeMillis) throws InterruptedException { * The thread which does the actual work of tailing edits journals and * applying the transactions to the FSNS. */ - private class EditLogTailerThread extends Thread { + private class EditLogTailerThread extends HadoopThread { private volatile boolean shouldRun = true; private EditLogTailerThread() { @@ -487,11 +486,11 @@ private void setShouldRun(boolean shouldRun) { } @Override - public void run() { - SecurityUtil.doAsLoginUserOrFatal( - new PrivilegedAction() { + public void work() { + SecurityUtil.callAsLoginUserOrFatalNoException( + new Callable() { @Override - public Object run() { + public Object call() { doWork(); return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index 8426bbe33023a..f0cfb665ab0c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.net.URI; import java.net.URL; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -50,6 +49,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; @@ -386,7 +386,7 @@ private long countUncheckpointedTxns() { img.getStorage().getMostRecentCheckpointTxId(); } - private class CheckpointerThread extends Thread { + private class CheckpointerThread extends HadoopThread { private volatile boolean shouldRun = true; private volatile long preventCheckpointsUntil = 0; @@ -399,13 +399,13 @@ private void setShouldRun(boolean shouldRun) { } @Override - public void run() { + public void work() { // We have to make sure we're logged in as far as JAAS // is concerned, in order to use kerberized SSL properly. - SecurityUtil.doAsLoginUserOrFatal( - new PrivilegedAction() { + SecurityUtil.callAsLoginUserOrFatalNoException( + new Callable() { @Override - public Object run() { + public Object call() { doWork(); return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 443c1836351ed..e3b36cb2b85fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -28,7 +28,6 @@ import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; import java.security.Principal; -import java.security.PrivilegedExceptionAction; import java.util.Base64; import java.util.Base64.Encoder; import java.util.Collection; @@ -37,6 +36,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import javax.servlet.ServletContext; @@ -208,14 +208,14 @@ protected ServletContext getContext() { return context; } - private T doAs(final UserGroupInformation ugi, - final PrivilegedExceptionAction action) + private T callAs(final UserGroupInformation ugi, + final Callable action) throws IOException, InterruptedException { - return useIpcCallq ? doAsExternalCall(ugi, action) : ugi.doAs(action); + return useIpcCallq ? callAsExternalCall(ugi, action) : ugi.callAs(action); } - private T doAsExternalCall(final UserGroupInformation ugi, - final PrivilegedExceptionAction action) + private T callAsExternalCall(final UserGroupInformation ugi, + final Callable action) throws IOException, InterruptedException { // set the remote address, if coming in via a trust proxy server then // the address with be that of the proxied client @@ -731,9 +731,9 @@ public Response put( createFlagParam, noredirect, policyName, ecpolicy, namespaceQuota, storagespaceQuota, storageType); - return doAs(ugi, new PrivilegedExceptionAction() { + return callAs(ugi, new Callable() { @Override - public Response run() throws IOException, URISyntaxException { + public Response call() throws IOException, URISyntaxException { return put(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, destination, owner, group, permission, unmaskedPermission, overwrite, bufferSize, @@ -1071,9 +1071,9 @@ public Response post( init(ugi, delegation, username, doAsUser, path, op, concatSrcs, bufferSize, excludeDatanodes, newLength); - return doAs(ugi, new PrivilegedExceptionAction() { + return callAs(ugi, new Callable() { @Override - public Response run() throws IOException, URISyntaxException { + public Response call() throws IOException, URISyntaxException { return post(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, concatSrcs, bufferSize, excludeDatanodes, newLength, noredirect); @@ -1321,7 +1321,7 @@ public Response get( renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction, snapshotName, oldSnapshotName, tokenKind, tokenService, startAfter, allUsers); - return doAs(ugi, () -> get(ugi, delegation, username, doAsUser, path.getAbsolutePath(), + return callAs(ugi, () -> get(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, offset, length, renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction, snapshotName, oldSnapshotName, snapshotDiffStartPath, snapshotDiffIndex, @@ -1717,9 +1717,9 @@ public void write(final OutputStream outstream) throws IOException { try { // restore remote user's ugi - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws IOException { + public Void call() throws IOException { long n = 0; for (DirectoryListing dirList = firstDirList; ; dirList = getDirectoryListing(cp, p, dirList.getLastName()) @@ -1834,7 +1834,7 @@ public Response delete( final UriFsPathParam path = new UriFsPathParam(uriInfo.getPath()); init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName); - return doAs(ugi, () -> delete(ugi, delegation, username, doAsUser, + return callAs(ugi, () -> delete(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, recursive, snapshotName)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index e279ea349b235..f669bdb06bd8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -27,7 +27,7 @@ import java.net.URLConnection; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.InterfaceAudience; @@ -169,10 +169,10 @@ public int run(final String[] args) throws IOException { } try { - return UserGroupInformation.getCurrentUser().doAs( - new PrivilegedExceptionAction() { + return UserGroupInformation.getCurrentUser().callAs( + new Callable() { @Override - public Integer run() throws Exception { + public Integer call() throws Exception { return doWork(args); } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index c2bf9214ca781..72e62a54b8ac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -20,9 +20,9 @@ import java.io.IOException; import java.io.PrintStream; import java.net.URI; -import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.Date; +import java.util.concurrent.Callable; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; @@ -132,9 +132,9 @@ public static void main(Configuration conf, final String[] args) final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]); // Login the current user - UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction() { + UserGroupInformation.getCurrentUser().callAs(new Callable() { @Override - public Object run() throws Exception { + public Object call() throws Exception { if (print) { printTokens(conf, tokenFile, verbose); } else if (cancel) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java index aaa1038fa7f41..6305d0eaccb3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java @@ -21,12 +21,12 @@ import java.io.PrintStream; import java.net.InetSocketAddress; import java.net.URISyntaxException; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Callable; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; @@ -341,10 +341,10 @@ private int doWork(String[] args) { @Override public int run(final String[] args) throws Exception { try { - return UserGroupInformation.getCurrentUser().doAs( - new PrivilegedExceptionAction() { + return UserGroupInformation.getCurrentUser().callAs( + new Callable() { @Override - public Integer run() throws Exception { + public Integer call() throws Exception { return doWork(args); } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java index 05e75c92c5a6d..4ffc2a181c85c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java @@ -27,7 +27,8 @@ import java.io.Writer; import java.net.URI; import java.security.NoSuchAlgorithmException; -import java.security.PrivilegedAction; +import java.util.concurrent.Callable; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1642,9 +1643,9 @@ private boolean doUserOp(UserGroupInformation ugi, final UserOp op) { UserGroupInformation.setLoginUser(ugi); // Create a test key - return ugi.doAs(new PrivilegedAction() { + return ugi.callAsNoException(new Callable() { @Override - public Boolean run() { + public Boolean call() { try { op.execute(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index f277b1a37b8d6..7e0e3ac643c2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; @@ -866,7 +867,7 @@ public Boolean get() { dataNodes[0].shutdown(); // Shutdown the second datanode when the pipeline is closing. - new Thread(() -> { + new HadoopThread(() -> { try { GenericTestUtils.waitFor(new Supplier() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 8eb2f588228f0..8027487f1a639 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -90,6 +90,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -729,7 +730,7 @@ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, in Counter counter = new Counter(0); for (int i = 0; i < threads; ++i ) { DFSClientReader reader = new DFSClientReader(file1, cluster, hash_sha, fileLen, counter); - readers[i] = new Thread(reader); + readers[i] = new HadoopThread(reader); readers[i].start(); } @@ -1018,7 +1019,7 @@ public static void namenodeRestartTest(final Configuration conf, assertFalse(HdfsUtils.isHealthy(uri)); //namenode is down, continue writing file4 in a thread - final Thread file4thread = new Thread(new Runnable() { + final Thread file4thread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1037,7 +1038,7 @@ public void run() { file4thread.start(); //namenode is down, read the file in a thread - final Thread reader = new Thread(new Runnable() { + final Thread reader = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1057,7 +1058,7 @@ public void run() { //namenode is down, create another file in a thread final Path file3 = new Path(dir, "file"); - final Thread thread = new Thread(new Runnable() { + final Thread thread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1072,7 +1073,7 @@ public void run() { thread.start(); //restart namenode in a new thread - new Thread(new Runnable() { + new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1125,7 +1126,7 @@ public void run() { assertFalse(HdfsUtils.isHealthy(uri)); //leave safe mode in a new thread - new Thread(new Runnable() { + new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1306,7 +1307,7 @@ public void delayWhenRenewLeaseTimeout() { out1.write(new byte[256]); - Thread closeThread = new Thread(new Runnable() { + Thread closeThread = new HadoopThread(new Runnable() { @Override public void run() { try { //1. trigger get LeaseRenewer lock diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java index d52b53d543206..b4b40197c278d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java @@ -60,6 +60,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -339,7 +340,7 @@ public void testCongestionAckDelay() { AtomicBoolean isDelay = new AtomicBoolean(true); // ResponseProcessor needs the dataQueue for the next step. - new Thread(() -> { + new HadoopThread(() -> { for (int i = 0; i < 10; i++) { // In order to ensure that other threads run for a period of time to prevent affecting // the results. @@ -376,7 +377,7 @@ public void testCongestionAckDelay() { // The purpose of adding packets to the dataQueue is to make the DataStreamer run // normally and judge whether to enter the sleep state according to the congestion. - new Thread(() -> { + new HadoopThread(() -> { for (int i = 0; i < 100; i++) { packet[i] = mock(DFSPacket.class); dataQueue.add(packet[i]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java index df3fc4f8c4370..dfe072b41efca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -456,7 +457,7 @@ public void sync() { } private void startWaitForDeadNodeThread(DFSClient dfsClient, int size) { - new Thread(() -> { + new HadoopThread(() -> { DeadNodeDetector deadNodeDetector = dfsClient.getClientContext().getDeadNodeDetector(); while (deadNodeDetector.clearAndGetDetectedDeadNodes().size() != size) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 63f8dc226980b..cc93bd2b3bf88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -86,6 +86,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -864,7 +865,7 @@ public void testDecommissionWithOpenfileReporting() closedFileSet, openFilesMap, maxDnOccurance); final AtomicBoolean stopRedundancyMonitor = new AtomicBoolean(false); - Thread monitorThread = new Thread(new Runnable() { + Thread monitorThread = new HadoopThread(new Runnable() { @Override public void run() { while (!stopRedundancyMonitor.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index 16e1ea25b4b7a..303fb164c5d5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.MethodOrderer; @@ -995,7 +996,7 @@ public void testDecommissionWithMissingBlock() throws Exception { // Handle decommission nodes in a new thread. // Verify that nodes are decommissioned. final CountDownLatch decomStarted = new CountDownLatch(0); - new Thread( + new HadoopThread( () -> { try { decomStarted.countDown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java index a8d3c52fc6fef..77b640a532b85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java @@ -38,6 +38,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; @@ -221,7 +222,7 @@ public void testImmediateReadOfNewFile() final AtomicReference errorMessage = new AtomicReference(); final FSDataOutputStream out = fileSystem.create(file); - final Thread writer = new Thread(new Runnable() { + final Thread writer = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -241,7 +242,7 @@ public void run() { } }); - Thread opener = new Thread(new Runnable() { + Thread opener = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -346,7 +347,7 @@ private void runTestUnfinishedBlockCRCError( final AtomicBoolean writerStarted = new AtomicBoolean(false); final AtomicBoolean error = new AtomicBoolean(false); - final Thread writer = new Thread(new Runnable() { + final Thread writer = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -379,7 +380,7 @@ public void run() { } } }); - Thread tailer = new Thread(new Runnable() { + Thread tailer = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java index 8f5ba9018dfa7..cfda31d91d1bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.log4j.Level; import org.junit.jupiter.api.Assertions; @@ -159,7 +160,7 @@ public void testInterruptReader() throws Exception { final FSDataInputStream in = fs.open(file); AtomicBoolean readInterrupted = new AtomicBoolean(false); - final Thread reader = new Thread(new Runnable() { + final Thread reader = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java index 810f7e1864d17..fb131fe780acf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java @@ -73,6 +73,7 @@ import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -235,7 +236,7 @@ public void run() { }; Thread threads[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new Thread(readerRunnable); + threads[i] = new HadoopThread(readerRunnable); threads[i].start(); } Thread.sleep(500); @@ -334,7 +335,7 @@ public void run() { }; Thread threads[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new Thread(readerRunnable); + threads[i] = new HadoopThread(readerRunnable); threads[i].start(); } gotFailureLatch.await(); @@ -640,7 +641,7 @@ public void run() { } } }; - Thread thread = new Thread(readerRunnable); + Thread thread = new HadoopThread(readerRunnable); thread.start(); // While the thread is reading, send it interrupts. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java index 230945d09631c..a81db9bc6f62a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java @@ -34,7 +34,7 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.VersionInfo; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.Test; import java.lang.management.ManagementFactory; @@ -105,7 +105,7 @@ private long addOneDataNode(Configuration conf) throws Exception { } private Thread newBalancerService(Configuration conf, String[] args) { - return new Thread(new Runnable() { + return new HadoopThread(new Runnable() { @Override public void run() { Tool cli = new Balancer.Cli(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index b6f27ca0c2329..8d6fc050a5dc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; @@ -1522,7 +1523,7 @@ public void testAsyncIBR() throws Exception { Thread[] writers = new Thread[numWriters]; for (int i=0; i < writers.length; i++) { final Path p = new Path("/writer"+i); - writers[i] = new Thread(new Runnable() { + writers[i] = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index da89b85f6de78..9ef31ccc5863f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -87,6 +87,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; @@ -337,7 +338,7 @@ public void blockUtilSendFullBlockReport() { }); countBlockReportItems(FAKE_BLOCK, mockNN1, blocks); - addNewBlockThread = new Thread(() -> { + addNewBlockThread = new HadoopThread(() -> { for (int i = 0; i < totalTestBlocks; i++) { SimulatedFSDataset fsDataset = (SimulatedFSDataset) mockFSDataset; SimulatedStorage simulatedStorage = fsDataset.getStorages().get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 570d41a69dba4..2afc98e8c36c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -96,6 +96,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -917,7 +918,7 @@ private void testStopWorker(final TestStopWorkerRunnable tswr) final RecoveringBlock recoveringBlock = Iterators.get(recoveringBlocks.iterator(), 0); final ExtendedBlock block = recoveringBlock.getBlock(); - Thread slowWriterThread = new Thread(new Runnable() { + Thread slowWriterThread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -944,7 +945,7 @@ public void run() { progressParent.uninterruptiblyAcquire(60000); // Start a worker thread which will attempt to stop the writer. - Thread stopWriterThread = new Thread(new Runnable() { + Thread stopWriterThread = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java index 74d18b55c6cdc..1809df121c065 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.test.TestName; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -248,7 +249,7 @@ public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() final DataNode dataNode = cluster.getDataNodes().get(0); final AtomicBoolean recoveryInitResult = new AtomicBoolean(true); - Thread recoveryThread = new Thread(() -> { + Thread recoveryThread = new HadoopThread(() -> { try { DatanodeInfo[] locations = block.getLocations(); final BlockRecoveryCommand.RecoveringBlock recoveringBlock = @@ -367,7 +368,7 @@ public void testEcRecoverBlocks() throws Throwable { // write 5MB File AppendTestUtil.write(stm, 0, 1024 * 1024 * 5); final AtomicReference err = new AtomicReference<>(); - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { try { stm.close(); } catch (Throwable t1) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index babce8d5833cf..6ddd009c4ba0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -51,6 +51,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -591,7 +592,7 @@ public void run() { public Object answer(InvocationOnMock invocationOnMock) throws Throwable { final Random r = new Random(); Thread addVolThread = - new Thread(new Runnable() { + new HadoopThread(new Runnable() { @Override public void run() { try { @@ -928,7 +929,7 @@ public void logDelaySendingAckToUpstream( final DataNode dataNode = dn; final CyclicBarrier reconfigBarrier = new CyclicBarrier(2); - Thread reconfigThread = new Thread(() -> { + Thread reconfigThread = new HadoopThread(() -> { try { reconfigBarrier.await(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index f4d66f8c8d001..a20bf94b2ac07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -82,6 +82,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -441,7 +442,7 @@ public void delayWhenOfferServiceHoldLock() { BPServiceActor actor = service.getBPServiceActors().get(0); DatanodeRegistration bpRegistration = actor.getBpRegistration(); - Thread register = new Thread(() -> { + Thread register = new HadoopThread(() -> { try { service.registrationSucceeded(actor, bpRegistration); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java index 81f6020088965..15c7f71f0922f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock; import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -85,7 +86,7 @@ public void testBaseFunc() { @Test @Timeout(value = 5) public void testAcquireWriteLockError() throws InterruptedException { - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { manager.readLock(LockLevel.BLOCK_POOl, "test"); manager.writeLock(LockLevel.BLOCK_POOl, "test"); }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java index c6b5592c3c01d..1e5b979789c8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java @@ -34,6 +34,7 @@ import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -95,7 +96,7 @@ public NullDataNode(Configuration conf, OutputStream out, int port) throws any(StorageType.class), any(String.class), any(ExtendedBlock.class), anyBoolean()); - new Thread(new NullServer(port)).start(); + new HadoopThread(new NullServer(port)).start(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index f1f57a9714f02..c359edb649f60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ThreadUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -196,7 +197,7 @@ public void run() { Thread threads[] = new Thread[NUM_TASKS]; for (int i = 0; i < NUM_TASKS; i++) { - threads[i] = new Thread(readerRunnable); + threads[i] = new HadoopThread(readerRunnable); threads[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java index ef84c1732d5a9..bbb3c0552354a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java @@ -552,7 +552,7 @@ private static class Writer extends Daemon { } @Override - public void run() { + public void work() { /** * Create a file, write up to 3 blocks of data and close the file. * Do this in a loop until we are told to stop. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index 61c147e6e5ea7..7ccdd625306b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -39,6 +39,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -315,7 +316,7 @@ public void testAuditLoggerWithCallContext() throws IOException { .build(); CallerContext.setCurrent(context); LOG.info("Set current caller context as {}", CallerContext.getCurrent()); - Thread child = new Thread(new Runnable() + Thread child = new HadoopThread(new Runnable() { @Override public void run() { @@ -342,7 +343,7 @@ public void run() { .setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING)) .build(); LOG.info("Set current caller context as {}", CallerContext.getCurrent()); - child = new Thread(new Runnable() + child = new HadoopThread(new Runnable() { @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index d9002f83948f8..f52f36556b475 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -59,6 +59,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.Whitebox; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; @@ -456,14 +457,14 @@ public void testOpenRenameRace() throws Exception { // 6.release writeLock, it's fair lock so open thread gets read lock. // 7.open thread unlocks, rename gets write lock and does rename. // 8.rename thread unlocks, open thread gets write lock and update time. - Thread open = new Thread(() -> { + Thread open = new HadoopThread(() -> { try { openSem.release(); fsn.getBlockLocations("foo", src, 0, 5); } catch (IOException e) { } }); - Thread rename = new Thread(() -> { + Thread rename = new HadoopThread(() -> { try { openSem.acquire(); renameSem.release(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index c83676b245f7b..1d3625477fb59 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -71,6 +71,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.event.Level; import org.junit.After; import org.junit.Before; @@ -259,7 +260,7 @@ public void delay() { DataNodeFaultInjector.set(injector); // Truncate by using different client name. - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { String hdfsCacheDisableKey = "fs.hdfs.impl.disable.cache"; boolean originCacheDisable = conf.getBoolean(hdfsCacheDisableKey, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java index 7c448f19a3bb8..460ad53f735b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.ChunkedArrayList; import org.junit.After; import org.junit.Before; @@ -223,7 +224,7 @@ public void testListOpenFilesInHA() throws Exception { final AtomicBoolean failoverCompleted = new AtomicBoolean(false); final AtomicBoolean listOpenFilesError = new AtomicBoolean(false); final int listingIntervalMsec = 250; - Thread clientThread = new Thread(new Runnable() { + Thread clientThread = new HadoopThread(new Runnable() { @Override public void run() { while(!failoverCompleted.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index ffcf72ad9ab07..0313a2e488c21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -62,6 +62,7 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.util.concurrent.HadoopThread; public class TestBootstrapStandby { private static final Logger LOG = @@ -401,7 +402,7 @@ public void testRateThrottling() throws Exception { final int timeOut = (int)(imageFile.length() / minXferRatePerMS) + 1; // A very low DFS_IMAGE_TRANSFER_RATE_KEY value won't affect bootstrapping final AtomicBoolean bootStrapped = new AtomicBoolean(false); - new Thread( + new HadoopThread( new Runnable() { @Override public void run() { @@ -431,7 +432,7 @@ public Boolean get() { // A very low DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY value should // cause timeout bootStrapped.set(false); - new Thread( + new HadoopThread( new Runnable() { @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java index ff6c2288b538b..57be6dddce6ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java @@ -55,6 +55,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -150,7 +151,7 @@ public void testMsyncSimple() throws Exception { dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new Thread(() -> { + Thread reader = new HadoopThread(() -> { try { // this read will block until roll and tail edits happen. dfs.getFileStatus(testPath); @@ -200,7 +201,7 @@ private void testMsync(boolean autoMsync, long autoMsyncPeriodMs) dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new Thread(() -> { + Thread reader = new HadoopThread(() -> { try { // After msync, client should have the latest state ID from active. // Therefore, the subsequent getFileStatus call should succeed. @@ -289,7 +290,7 @@ public void testCallFromNewClient() throws Exception { (DistributedFileSystem) FileSystem.get(conf2); dfs2.getClient().getHAServiceState(); - Thread reader = new Thread(() -> { + Thread reader = new HadoopThread(() -> { try { dfs2.getFileStatus(testPath); readStatus.set(1); @@ -330,7 +331,7 @@ public void testUncoordinatedCall() throws Exception { AtomicInteger readStatus = new AtomicInteger(0); // create a separate thread to make a blocking read. - Thread reader = new Thread(() -> { + Thread reader = new HadoopThread(() -> { try { // this read call will block until server state catches up. But due to // configuration, this will take a very long time. @@ -435,7 +436,7 @@ public void testRpcQueueTimeNumOpsMetrics() throws Exception { dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new Thread(new Runnable() { + Thread reader = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index 04a34160facdc..d361ad5215653 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -700,7 +701,7 @@ public void testOpenFileWritingAcrossSnapDeletion() throws Exception { final AtomicBoolean writerError = new AtomicBoolean(false); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch deleteLatch = new CountDownLatch(1); - Thread t = new Thread(new Runnable() { + Thread t = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index d85755812a82e..0f1c1ad6221ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -24,7 +24,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintStream; -import java.security.PrivilegedAction; +import java.util.concurrent.Callable; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; @@ -922,9 +922,9 @@ public void testDeleteSnapshotWithPermissionsDisabled() throws Exception { hdfs.createSnapshot(path, "s1"); UserGroupInformation anotherUser = UserGroupInformation .createRemoteUser("anotheruser"); - anotherUser.doAs(new PrivilegedAction() { + anotherUser.callAsNoException(new Callable() { @Override - public Object run() { + public Object call() { DistributedFileSystem anotherUserFS = null; try { anotherUserFS = cluster.getFileSystem(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index 4c245136efb41..47d651c9566f6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -64,6 +64,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -151,7 +152,7 @@ public void serviceStart() throws Exception { HadoopExecutors.newSingleThreadExecutor(new ThreadFactoryBuilder(). setDaemon(true).setNameFormat("uber-SubtaskRunner").build()); // create and start an event handling thread - eventHandler = new Thread(new EventHandler(), "uber-EventHandler"); + eventHandler = new HadoopThread(new EventHandler(), "uber-EventHandler"); // if the job classloader is specified, set it onto the event handler as the // thread context classloader so that it can be used by the event handler // as well as the subtask runner threads diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java index bbf527ebff53a..c891a55ba3a42 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java @@ -23,7 +23,7 @@ import java.io.IOException; import java.io.OutputStream; import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; import java.util.concurrent.ScheduledExecutorService; import org.apache.commons.lang3.exception.ExceptionUtils; @@ -113,9 +113,9 @@ public static void main(String[] args) throws Throwable { SecurityUtil.setTokenService(jt, address); taskOwner.addToken(jt); final TaskUmbilicalProtocol umbilical = - taskOwner.doAs(new PrivilegedExceptionAction() { + taskOwner.callAs(new Callable() { @Override - public TaskUmbilicalProtocol run() throws Exception { + public TaskUmbilicalProtocol call() throws Exception { return (TaskUmbilicalProtocol)RPC.getProxy(TaskUmbilicalProtocol.class, TaskUmbilicalProtocol.versionID, address, job); } @@ -169,9 +169,9 @@ public TaskUmbilicalProtocol run() throws Exception { // Create a final reference to the task for the doAs block final Task taskFinal = task; - childUGI.doAs(new PrivilegedExceptionAction() { + childUGI.callAs(new Callable() { @Override - public Object run() throws Exception { + public Object call() throws Exception { // use job-specified working directory setEncryptedSpillKeyIfRequired(taskFinal); FileSystem.get(job).setWorkingDirectory(job.getWorkingDirectory()); @@ -194,9 +194,9 @@ public Object run() throws Exception { task.taskCleanup(umbilical); } else { final Task taskFinal = task; - childUGI.doAs(new PrivilegedExceptionAction() { + childUGI.callAs(new Callable() { @Override - public Object run() throws Exception { + public Object call() throws Exception { taskFinal.taskCleanup(umbilical); return null; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 6ab06696c19df..72b395f11906b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -65,6 +65,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse; @@ -351,7 +352,7 @@ protected void serviceStart() throws Exception { } else if (timelineV2Client != null) { timelineV2Client.start(); } - eventHandlingThread = new Thread(new Runnable() { + eventHandlingThread = new HadoopThread(new Runnable() { @Override public void run() { JobHistoryEvent event = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index eb3583b41bc71..39ebff0971074 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -33,6 +33,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.atomic.AtomicBoolean; @@ -134,6 +135,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; @@ -739,10 +741,10 @@ private class JobFinishEventHandler implements EventHandler { public void handle(JobFinishEvent event) { // Create a new thread to shutdown the AM. We should not do it in-line // to avoid blocking the dispatcher itself. - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { shutDownJob(); } }.start(); @@ -1761,9 +1763,9 @@ protected static void initAndStartAppMaster(final MRAppMaster appMaster, } } conf.getCredentials().addAll(credentials); - appMasterUgi.doAs(new PrivilegedExceptionAction() { + appMasterUgi.callAs(new Callable() { @Override - public Object run() throws Exception { + public Object call() throws Exception { appMaster.init(conf); appMaster.start(); if(appMaster.errorHappenedShutDown) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java index 13389d67efb71..c9c5e57e08cb1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java @@ -33,6 +33,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.util.Clock; import org.slf4j.Logger; @@ -125,7 +126,7 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - lostTaskCheckerThread = new Thread(new PingChecker()); + lostTaskCheckerThread = new HadoopThread(new PingChecker()); lostTaskCheckerThread.setName("TaskHeartbeatHandler PingChecker"); lostTaskCheckerThread.start(); super.serviceStart(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java index c72e13e648e15..82f28bf019a55 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java @@ -47,6 +47,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -126,7 +127,7 @@ protected void serviceStart() throws Exception { ThreadFactory backingTf = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - Thread thread = new Thread(r); + Thread thread = new HadoopThread(r); thread.setContextClassLoader(jobClassLoader); return thread; } @@ -136,7 +137,7 @@ public Thread newThread(Runnable r) { ThreadFactory tf = tfBuilder.build(); launcherPool = new HadoopThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventHandlingThread = new Thread(new Runnable() { + eventHandlingThread = new HadoopThread(new Runnable() { @Override public void run() { CommitterEvent event = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java index d184d9be64bf8..0724fed2561be 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java @@ -43,6 +43,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; @@ -285,9 +286,9 @@ protected void serviceStart() throws Exception { Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventHandlingThread = new Thread() { + eventHandlingThread = new HadoopThread() { @Override - public void run() { + public void work() { ContainerLauncherEvent event = null; Set allNodes = new HashSet(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index b836120a8dcb4..0ef80445d3003 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -40,6 +40,8 @@ import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.SubjectUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; @@ -300,7 +302,7 @@ public void run() { } protected void startAllocatorThread() { - allocatorThread = new Thread(new AllocatorRunnable()); + allocatorThread = new HadoopThread(new AllocatorRunnable()); allocatorThread.setName("RMCommunicator Allocator"); allocatorThread.start(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java index cf2f90ff1e563..ae1ad1a7eca8b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java @@ -62,6 +62,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -111,7 +112,7 @@ public class RMContainerAllocator extends RMContainerRequestor public static final String RAMPDOWN_DIAGNOSTIC = "Reducer preempted " + "to make room for pending map attempts"; - private Thread eventHandlingThread; + private HadoopThread eventHandlingThread; private final AtomicBoolean stopped; static { @@ -246,10 +247,10 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - this.eventHandlingThread = new Thread() { + this.eventHandlingThread = new HadoopThread() { @SuppressWarnings("unchecked") @Override - public void run() { + public void work() { ContainerAllocatorEvent event; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java index 800ff1809704e..2a63dd876e595 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java @@ -45,6 +45,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.Clock; @@ -219,7 +220,7 @@ public void run() { } } }; - speculationBackgroundThread = new Thread + speculationBackgroundThread = new HadoopThread (speculationBackgroundCore, "DefaultSpeculator background processing"); speculationBackgroundThread.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java index 34f4c8c7164cf..717783964d805 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java @@ -39,6 +39,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -137,7 +138,7 @@ public void handle(ContainerAllocatorEvent event) { } @Override protected void serviceStart() throws Exception { - thread = new Thread(new Runnable() { + thread = new HadoopThread(new Runnable() { @Override @SuppressWarnings("unchecked") public void run() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java index aae1fd0b673f6..fe4ca80b8c722 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java @@ -72,6 +72,7 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,7 +115,7 @@ public ProtocolSignature getProtocolSignature(String protocol, this, protocol, clientVersion, clientMethodsHash); } - private class Job extends Thread implements TaskUmbilicalProtocol { + private class Job extends HadoopThread implements TaskUmbilicalProtocol { // The job directory on the system: JobClient places job configurations here. // This is analogous to JobTracker's system directory. private Path systemJobDir; @@ -521,7 +522,7 @@ private void runTasks(List runnables, } @Override - public void run() { + public void work() { JobID jobId = profile.getJobID(); JobContext jContext = new JobContextImpl(job, jobId); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/MRDelegationTokenRenewer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/MRDelegationTokenRenewer.java index d24888ee7ac31..afe4b4a9db6a9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/MRDelegationTokenRenewer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/MRDelegationTokenRenewer.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; +import java.util.concurrent.Callable; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -110,9 +110,9 @@ protected MRClientProtocol instantiateHistoryProxy(final Configuration conf, } final YarnRPC rpc = YarnRPC.create(conf); UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); - return currentUser.doAs(new PrivilegedAction() { + return currentUser.callAsNoException(new Callable() { @Override - public MRClientProtocol run() { + public MRClientProtocol call() { return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class, hsAddress, conf); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java index a40a40ada02d9..abbce2eda75bd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.concurrent.HadoopThread; class CleanupQueue { @@ -100,7 +101,7 @@ protected boolean isQueueEmpty() { return (cleanupThread.queue.size() == 0); } - private static class PathCleanupThread extends Thread { + private static class PathCleanupThread extends HadoopThread { // cleanup queue which deletes files/directories of the paths queued up. private LinkedBlockingQueue queue = @@ -120,7 +121,7 @@ void addToQueue(PathDeletionContext[] contexts) { } } - public void run() { + public void work() { if (LOG.isDebugEnabled()) { LOG.debug(getName() + " started."); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java index f94752fc238e3..254d7705869a6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java @@ -21,10 +21,10 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.URL; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.concurrent.Callable; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -567,9 +567,9 @@ public RunningJob submitJobInternal(final JobConf conf) try { conf.setBooleanIfUnset("mapred.mapper.new-api", false); conf.setBooleanIfUnset("mapred.reducer.new-api", false); - Job job = clientUgi.doAs(new PrivilegedExceptionAction () { + Job job = clientUgi.callAs(new Callable () { @Override - public Job run() throws IOException, ClassNotFoundException, + public Job call() throws IOException, ClassNotFoundException, InterruptedException { Job job = Job.getInstance(conf); job.submit(); @@ -596,8 +596,8 @@ public Job run() throws IOException, ClassNotFoundException, private Job getJobUsingCluster(final JobID jobid) throws IOException, InterruptedException { - return clientUgi.doAs(new PrivilegedExceptionAction() { - public Job run() throws IOException, InterruptedException { + return clientUgi.callAs(new Callable() { + public Job call() throws IOException, InterruptedException { return cluster.getJob(jobid); } }); @@ -753,8 +753,8 @@ public void displayTasks(final JobID jobId, String type, String state) */ public ClusterStatus getClusterStatus() throws IOException { try { - return clientUgi.doAs(new PrivilegedExceptionAction() { - public ClusterStatus run() throws IOException, InterruptedException { + return clientUgi.callAs(new Callable() { + public ClusterStatus call() throws IOException, InterruptedException { ClusterMetrics metrics = cluster.getClusterStatus(); return new ClusterStatus(metrics.getTaskTrackerCount(), metrics .getBlackListedTaskTrackerCount(), cluster @@ -801,8 +801,8 @@ private Collection arrayToBlackListInfo(TaskTrackerInfo[] objs) */ public ClusterStatus getClusterStatus(boolean detailed) throws IOException { try { - return clientUgi.doAs(new PrivilegedExceptionAction() { - public ClusterStatus run() throws IOException, InterruptedException { + return clientUgi.callAs(new Callable() { + public ClusterStatus call() throws IOException, InterruptedException { ClusterMetrics metrics = cluster.getClusterStatus(); return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()), arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()), @@ -843,9 +843,9 @@ public JobStatus[] jobsToComplete() throws IOException { public JobStatus[] getAllJobs() throws IOException { try { org.apache.hadoop.mapreduce.JobStatus[] jobs = - clientUgi.doAs(new PrivilegedExceptionAction< + clientUgi.callAs(new Callable< org.apache.hadoop.mapreduce.JobStatus[]> () { - public org.apache.hadoop.mapreduce.JobStatus[] run() + public org.apache.hadoop.mapreduce.JobStatus[] call() throws IOException, InterruptedException { return cluster.getAllJobStatuses(); } @@ -971,9 +971,9 @@ protected long getCounter(org.apache.hadoop.mapreduce.Counters cntrs, */ public int getDefaultMaps() throws IOException { try { - return clientUgi.doAs(new PrivilegedExceptionAction() { + return clientUgi.callAs(new Callable() { @Override - public Integer run() throws IOException, InterruptedException { + public Integer call() throws IOException, InterruptedException { return cluster.getClusterStatus().getMapSlotCapacity(); } }); @@ -990,9 +990,9 @@ public Integer run() throws IOException, InterruptedException { */ public int getDefaultReduces() throws IOException { try { - return clientUgi.doAs(new PrivilegedExceptionAction() { + return clientUgi.callAs(new Callable() { @Override - public Integer run() throws IOException, InterruptedException { + public Integer call() throws IOException, InterruptedException { return cluster.getClusterStatus().getReduceSlotCapacity(); } }); @@ -1008,9 +1008,9 @@ public Integer run() throws IOException, InterruptedException { */ public Path getSystemDir() { try { - return clientUgi.doAs(new PrivilegedExceptionAction() { + return clientUgi.callAs(new Callable() { @Override - public Path run() throws IOException, InterruptedException { + public Path call() throws IOException, InterruptedException { return cluster.getSystemDir(); } }); @@ -1053,9 +1053,9 @@ public static boolean isJobDirValid(Path jobDirPath, FileSystem fs) */ public Path getStagingAreaDir() throws IOException { try { - return clientUgi.doAs(new PrivilegedExceptionAction() { + return clientUgi.callAs(new Callable() { @Override - public Path run() throws IOException, InterruptedException { + public Path call() throws IOException, InterruptedException { return cluster.getStagingAreaDir(); } }); @@ -1097,8 +1097,8 @@ private JobQueueInfo[] getJobQueueInfoArray(QueueInfo[] queues) */ public JobQueueInfo[] getRootQueues() throws IOException { try { - return clientUgi.doAs(new PrivilegedExceptionAction() { - public JobQueueInfo[] run() throws IOException, InterruptedException { + return clientUgi.callAs(new Callable() { + public JobQueueInfo[] call() throws IOException, InterruptedException { return getJobQueueInfoArray(cluster.getRootQueues()); } }); @@ -1117,8 +1117,8 @@ public JobQueueInfo[] run() throws IOException, InterruptedException { */ public JobQueueInfo[] getChildQueues(final String queueName) throws IOException { try { - return clientUgi.doAs(new PrivilegedExceptionAction() { - public JobQueueInfo[] run() throws IOException, InterruptedException { + return clientUgi.callAs(new Callable() { + public JobQueueInfo[] call() throws IOException, InterruptedException { return getJobQueueInfoArray(cluster.getChildQueues(queueName)); } }); @@ -1136,8 +1136,8 @@ public JobQueueInfo[] run() throws IOException, InterruptedException { */ public JobQueueInfo[] getQueues() throws IOException { try { - return clientUgi.doAs(new PrivilegedExceptionAction() { - public JobQueueInfo[] run() throws IOException, InterruptedException { + return clientUgi.callAs(new Callable() { + public JobQueueInfo[] call() throws IOException, InterruptedException { return getJobQueueInfoArray(cluster.getQueues()); } }); @@ -1156,9 +1156,9 @@ public JobQueueInfo[] run() throws IOException, InterruptedException { public JobStatus[] getJobsFromQueue(final String queueName) throws IOException { try { - QueueInfo queue = clientUgi.doAs(new PrivilegedExceptionAction() { + QueueInfo queue = clientUgi.callAs(new Callable() { @Override - public QueueInfo run() throws IOException, InterruptedException { + public QueueInfo call() throws IOException, InterruptedException { return cluster.getQueue(queueName); } }); @@ -1186,9 +1186,9 @@ public QueueInfo run() throws IOException, InterruptedException { */ public JobQueueInfo getQueueInfo(final String queueName) throws IOException { try { - QueueInfo queueInfo = clientUgi.doAs(new - PrivilegedExceptionAction() { - public QueueInfo run() throws IOException, InterruptedException { + QueueInfo queueInfo = clientUgi.callAs(new + Callable() { + public QueueInfo call() throws IOException, InterruptedException { return cluster.getQueue(queueName); } }); @@ -1209,10 +1209,10 @@ public QueueInfo run() throws IOException, InterruptedException { public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException { try { org.apache.hadoop.mapreduce.QueueAclsInfo[] acls = - clientUgi.doAs(new - PrivilegedExceptionAction + clientUgi.callAs(new + Callable () { - public org.apache.hadoop.mapreduce.QueueAclsInfo[] run() + public org.apache.hadoop.mapreduce.QueueAclsInfo[] call() throws IOException, InterruptedException { return cluster.getQueueAclsForCurrentUser(); } @@ -1235,9 +1235,9 @@ public org.apache.hadoop.mapreduce.QueueAclsInfo[] run() */ public Token getDelegationToken(final Text renewer) throws IOException, InterruptedException { - return clientUgi.doAs(new - PrivilegedExceptionAction>() { - public Token run() throws IOException, + return clientUgi.callAs(new + Callable>() { + public Token call() throws IOException, InterruptedException { return cluster.getDelegationToken(renewer); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index 4f86f912838fa..5ebe61df6e967 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -74,6 +74,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1549,10 +1550,10 @@ public void flush() throws IOException, ClassNotFoundException, public void close() { } - protected class SpillThread extends Thread { + protected class SpillThread extends HadoopThread { @Override - public void run() { + public void work() { spillLock.lock(); spillThreadRunning = true; try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java index 6861f1b2cd36d..7820f49184d88 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java @@ -72,6 +72,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -952,7 +953,7 @@ void resetDoneFlag() { } public void startCommunicationThread() { if (pingThread == null) { - pingThread = new Thread(this, "communication thread"); + pingThread = new HadoopThread(this, "communication thread"); pingThread.setDaemon(true); pingThread.start(); } @@ -963,7 +964,7 @@ public void startDiskLimitCheckerThreadIfNeeded() { MRJobConfig.JOB_SINGLE_DISK_LIMIT_BYTES, MRJobConfig.DEFAULT_JOB_SINGLE_DISK_LIMIT_BYTES) >= 0) { try { - diskLimitCheckThread = new Thread(new DiskLimitCheck(conf), + diskLimitCheckThread = new HadoopThread(new DiskLimitCheck(conf), "disk limit check thread"); diskLimitCheckThread.setDaemon(true); diskLimitCheckThread.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java index 767dcd45143ec..a9f562df34232 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java @@ -57,6 +57,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -279,7 +280,7 @@ public static String createDigest(byte[] password, String data) } @VisibleForTesting - public static class PingSocketCleaner extends Thread { + public static class PingSocketCleaner extends HadoopThread { private final ServerSocket serverSocket; private final int soTimeout; @@ -290,7 +291,7 @@ public static class PingSocketCleaner extends Thread { } @Override - public void run() { + public void work() { LOG.info("PingSocketCleaner started..."); while (!Thread.currentThread().isInterrupted()) { Socket clientSocket = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java index 89c594a89b034..6b9530ba41261 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java @@ -42,6 +42,7 @@ import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,7 +96,7 @@ private enum MessageType { START(0), private static class UplinkReaderThread - extends Thread { + extends HadoopThread { private DataInputStream inStream; private UpwardProtocol handler; @@ -117,7 +118,7 @@ public void closeConnection() throws IOException { inStream.close(); } - public void run() { + public void work() { while (true) { try { if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java index 050d0e5a6e0c8..a52f1030e976b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java @@ -21,11 +21,11 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; import java.util.ServiceConfigurationError; import java.util.ServiceLoader; +import java.util.concurrent.Callable; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; @@ -190,8 +190,8 @@ public synchronized FileSystem getFileSystem() throws IOException, InterruptedException { if (this.fs == null) { try { - this.fs = ugi.doAs(new PrivilegedExceptionAction() { - public FileSystem run() throws IOException, InterruptedException { + this.fs = ugi.callAs(new Callable() { + public FileSystem call() throws IOException, InterruptedException { final Path sysDir = new Path(client.getSystemDir()); return sysDir.getFileSystem(getConf()); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java index dada6595b6b83..f9fa749448ac8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java @@ -20,10 +20,10 @@ import java.io.IOException; import java.net.URI; -import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.LinkedHashMap; import java.util.Map; +import java.util.concurrent.Callable; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -326,9 +326,9 @@ synchronized void ensureFreshStatus() */ synchronized void updateStatus() throws IOException { try { - this.status = ugi.doAs(new PrivilegedExceptionAction() { + this.status = ugi.callAs(new Callable() { @Override - public JobStatus run() throws IOException, InterruptedException { + public JobStatus call() throws IOException, InterruptedException { return cluster.getClient().getJobStatus(getJobID()); } }); @@ -504,10 +504,10 @@ String getTaskFailureEventString() throws IOException, InterruptedException { int failCount = 1; TaskCompletionEvent lastEvent = null; - TaskCompletionEvent[] events = ugi.doAs(new - PrivilegedExceptionAction() { + TaskCompletionEvent[] events = ugi.callAs(new + Callable() { @Override - public TaskCompletionEvent[] run() throws IOException, + public TaskCompletionEvent[] call() throws IOException, InterruptedException { return cluster.getClient().getTaskCompletionEvents( status.getJobID(), 0, 10); @@ -542,8 +542,8 @@ public TaskReport[] getTaskReports(TaskType type) throws IOException, InterruptedException { ensureState(JobState.RUNNING); final TaskType tmpType = type; - return ugi.doAs(new PrivilegedExceptionAction() { - public TaskReport[] run() throws IOException, InterruptedException { + return ugi.callAs(new Callable() { + public TaskReport[] call() throws IOException, InterruptedException { return cluster.getClient().getTaskReports(getJobID(), tmpType); } }); @@ -659,9 +659,9 @@ public void setPriority(JobPriority jobPriority) throws IOException, } else { ensureState(JobState.RUNNING); final int tmpPriority = convertPriorityToInteger(jobPriority); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Object run() throws IOException, InterruptedException { + public Object call() throws IOException, InterruptedException { cluster.getClient() .setJobPriority(getJobID(), Integer.toString(tmpPriority)); return null; @@ -684,9 +684,9 @@ public void setPriorityAsInteger(int jobPriority) throws IOException, } else { ensureState(JobState.RUNNING); final int tmpPriority = jobPriority; - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Object run() throws IOException, InterruptedException { + public Object call() throws IOException, InterruptedException { cluster.getClient() .setJobPriority(getJobID(), Integer.toString(tmpPriority)); return null; @@ -727,9 +727,9 @@ private int convertPriorityToInteger(JobPriority jobPriority) { public TaskCompletionEvent[] getTaskCompletionEvents(final int startFrom, final int numEvents) throws IOException, InterruptedException { ensureState(JobState.RUNNING); - return ugi.doAs(new PrivilegedExceptionAction() { + return ugi.callAs(new Callable() { @Override - public TaskCompletionEvent[] run() throws IOException, InterruptedException { + public TaskCompletionEvent[] call() throws IOException, InterruptedException { return cluster.getClient().getTaskCompletionEvents(getJobID(), startFrom, numEvents); } @@ -771,8 +771,8 @@ public boolean killTask(final TaskAttemptID taskId, final boolean shouldFail) throws IOException { ensureState(JobState.RUNNING); try { - return ugi.doAs(new PrivilegedExceptionAction() { - public Boolean run() throws IOException, InterruptedException { + return ugi.callAs(new Callable() { + public Boolean call() throws IOException, InterruptedException { return cluster.getClient().killTask(taskId, shouldFail); } }); @@ -815,9 +815,9 @@ public Counters getCounters() throws IOException { ensureState(JobState.RUNNING); try { - return ugi.doAs(new PrivilegedExceptionAction() { + return ugi.callAs(new Callable() { @Override - public Counters run() throws IOException, InterruptedException { + public Counters call() throws IOException, InterruptedException { return cluster.getClient().getJobCounters(getJobID()); } }); @@ -836,9 +836,9 @@ public Counters run() throws IOException, InterruptedException { public String[] getTaskDiagnostics(final TaskAttemptID taskid) throws IOException, InterruptedException { ensureState(JobState.RUNNING); - return ugi.doAs(new PrivilegedExceptionAction() { + return ugi.callAs(new Callable() { @Override - public String[] run() throws IOException, InterruptedException { + public String[] call() throws IOException, InterruptedException { return cluster.getClient().getTaskDiagnostics(taskid); } }); @@ -1639,8 +1639,8 @@ synchronized void connect() throws IOException, InterruptedException, ClassNotFoundException { if (cluster == null) { cluster = - ugi.doAs(new PrivilegedExceptionAction() { - public Cluster run() + ugi.callAs(new Callable() { + public Cluster call() throws IOException, InterruptedException, ClassNotFoundException { return new Cluster(getConfiguration()); @@ -1671,8 +1671,8 @@ public void submit() connect(); final JobSubmitter submitter = getJobSubmitter(cluster.getFileSystem(), cluster.getClient()); - status = ugi.doAs(new PrivilegedExceptionAction() { - public JobStatus run() throws IOException, InterruptedException, + status = ugi.callAs(new Callable() { + public JobStatus call() throws IOException, InterruptedException, ClassNotFoundException { return submitter.submitJobInternal(Job.this, cluster); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java index 803ece7480c0d..3e51bac25b11a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java @@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.lib.map.WrappedMapper; import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * The Chain class provides all the common functionality for the @@ -296,7 +297,7 @@ private synchronized boolean setIfUnsetThrowable(Throwable th) { return false; } - private class MapRunner extends Thread { + private class MapRunner extends HadoopThread { private Mapper mapper; private Mapper.Context chainContext; private RecordReader rr; @@ -313,7 +314,7 @@ public MapRunner(Mapper mapper, } @Override - public void run() { + public void work() { if (getThrowable() != null) { return; } @@ -329,7 +330,7 @@ public void run() { } } - private class ReduceRunner extends Thread { + private class ReduceRunner extends HadoopThread { private Reducer reducer; private Reducer.Context chainContext; private RecordWriter rw; @@ -344,7 +345,7 @@ private class ReduceRunner extends Thread { } @Override - public void run() { + public void work() { try { reducer.run(chainContext); rw.close(chainContext); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java index 382ed959f12a0..c5b66dbd333e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapreduce.lib.map; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -247,7 +248,7 @@ public float getProgress() { } } - private class MapRunner extends Thread { + private class MapRunner extends HadoopThread { private Mapper mapper; private Context subcontext; private Throwable throwable; @@ -269,7 +270,7 @@ private class MapRunner extends Thread { } @Override - public void run() { + public void work() { try { mapper.run(subcontext); reader.close(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java index 0e28c3b41c02e..6fac8ff401714 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java @@ -23,10 +23,11 @@ import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -class EventFetcher extends Thread { +class EventFetcher extends HadoopThread { private static final long SLEEP_TIME = 1000; private static final int MAX_RETRIES = 10; private static final int RETRY_PERIOD = 5000; @@ -56,7 +57,7 @@ public EventFetcher(TaskAttemptID reduce, } @Override - public void run() { + public void work() { int failures = 0; LOG.info(reduce + " Thread started: " + getName()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java index 59ef95bdd462b..03a4569d40672 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java @@ -48,6 +48,7 @@ import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,7 +56,7 @@ import org.apache.hadoop.classification.VisibleForTesting; @VisibleForTesting -public class Fetcher extends Thread { +public class Fetcher extends HadoopThread { private static final Logger LOG = LoggerFactory.getLogger(Fetcher.class); @@ -187,7 +188,7 @@ public Fetcher(JobConf job, TaskAttemptID reduceId, } } - public void run() { + public void work() { try { while (!stopped && !Thread.currentThread().isInterrupted()) { MapHost host = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java index dc563eeab4d0f..9ad5db1a0ba4d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java @@ -71,7 +71,7 @@ public LocalFetcher(JobConf job, TaskAttemptID reduceId, setDaemon(true); } - public void run() { + public void work() { // Create a worklist of task attempts to work over. Set maps = new HashSet(); for (TaskAttemptID map : localMapFiles.keySet()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java index c617569da33e8..1022b574f27df 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java @@ -26,10 +26,11 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -abstract class MergeThread extends Thread { +abstract class MergeThread extends HadoopThread { private static final Logger LOG = LoggerFactory.getLogger(MergeThread.class); @@ -78,7 +79,7 @@ public synchronized void waitForMerge() throws InterruptedException { } } - public void run() { + public void work() { while (true) { List inputs = null; try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java index 173cd093e9f6e..be379cba18116 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java @@ -49,6 +49,7 @@ import org.apache.hadoop.mapreduce.task.reduce.MapHost.State; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -554,13 +555,13 @@ public int compareTo(Delayed o) { /** * A thread that takes hosts off of the penalty list when the timer expires. */ - private class Referee extends Thread { + private class Referee extends HadoopThread { public Referee() { setName("ShufflePenaltyReferee"); setDaemon(true); } - public void run() { + public void work() { try { while (true) { // take the first host that has an expired penalty diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java index 585a21d568231..7703fca7e61b5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -326,7 +327,7 @@ public static boolean isProcessGroupAlive(String pgrpId) { /** * Helper thread class that kills process-tree with SIGKILL in background */ - static class SigKillThread extends Thread { + static class SigKillThread extends HadoopThread { private String pid = null; private boolean isProcessGroup = false; @@ -339,7 +340,7 @@ private SigKillThread(String pid, boolean isProcessGroup, long interval) { sleepTimeBeforeSigKill = interval; } - public void run() { + public void work() { sigKillInCurrentThread(pid, isProcessGroup, sleepTimeBeforeSigKill); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java index e624b0304f166..d2f1a5fc69a5a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.mapreduce.MRJobConfig; import org.junit.jupiter.api.BeforeEach; @@ -221,9 +222,9 @@ public void testRemoveMap() throws Exception { // run multiple times for (int i = 0; i < 20; ++i) { - Thread getInfoThread = new Thread() { + Thread getInfoThread = new HadoopThread() { @Override - public void run() { + public void work() { try { cache.getIndexInformation("bigIndex", partsPerMap, big, user); } catch (Exception e) { @@ -231,9 +232,9 @@ public void run() { } } }; - Thread removeMapThread = new Thread() { + Thread removeMapThread = new HadoopThread() { @Override - public void run() { + public void work() { cache.removeMap("bigIndex"); } }; @@ -266,9 +267,9 @@ public void testCreateRace() throws Exception { // run multiple instances Thread[] getInfoThreads = new Thread[50]; for (int i = 0; i < 50; i++) { - getInfoThreads[i] = new Thread() { + getInfoThreads[i] = new HadoopThread() { @Override - public void run() { + public void work() { try { cache.getIndexInformation("racyIndex", partsPerMap, racy, user); cache.removeMap("racyIndex"); @@ -285,9 +286,9 @@ public void run() { final Thread mainTestThread = Thread.currentThread(); - Thread timeoutThread = new Thread() { + Thread timeoutThread = new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(15000); mainTestThread.interrupt(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java index ef43beaa5f797..46aef150978e3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.checkpoint.TaskCheckpointID; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -252,7 +253,7 @@ public void uncaughtException(Thread th, Throwable ex) { task.setConf(conf); DummyTaskReporter reporter = new DummyTaskReporter(task); reporter.startDiskLimitCheckerThreadIfNeeded(); - Thread t = new Thread(reporter); + Thread t = new HadoopThread(reporter); t.setUncaughtExceptionHandler(h); reporter.setProgressFlag(); t.start(); @@ -273,7 +274,7 @@ public void testTaskProgress() throws Exception { Task task = new DummyTask(); task.setConf(job); DummyTaskReporter reporter = new DummyTaskReporter(task); - Thread t = new Thread(reporter); + Thread t = new HadoopThread(reporter); t.start(); Thread.sleep(2100); task.setTaskDone(); @@ -328,7 +329,7 @@ public void uncaughtException(Thread th, Throwable ex) { Task task = new DummyTask(); task.setConf(conf); DummyTaskReporter reporter = new DummyTaskReporter(task); - Thread t = new Thread(reporter); + Thread t = new HadoopThread(reporter); t.setUncaughtExceptionHandler(h); reporter.setProgressFlag(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index 717281ffe78b0..827e20c16c2a9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -21,10 +21,10 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.security.AccessControlException; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; +import java.util.concurrent.Callable; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; @@ -231,10 +231,10 @@ private Job verifyAndGetJob(final JobId jobID, boolean exceptionThrow) Job job = null; try { loginUgi = UserGroupInformation.getLoginUser(); - job = loginUgi.doAs(new PrivilegedExceptionAction() { + job = loginUgi.callAs(new Callable() { @Override - public Job run() throws Exception { + public Job call() throws Exception { Job job = history.getJob(jobID); return job; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java index 14efa4cd49d74..d703733bacedf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; @@ -251,9 +251,9 @@ public void refreshLogRetentionSettings() throws IOException { UserGroupInformation user = checkAcls("refreshLogRetentionSettings"); try { - loginUGI.doAs(new PrivilegedExceptionAction() { + loginUGI.callAs(new Callable() { @Override - public Void run() throws IOException { + public Void call() throws IOException { aggLogDelService.refreshLogRetentionSettings(); return null; } @@ -271,9 +271,9 @@ public void refreshJobRetentionSettings() throws IOException { UserGroupInformation user = checkAcls("refreshJobRetentionSettings"); try { - loginUGI.doAs(new PrivilegedExceptionAction() { + loginUGI.callAs(new Callable() { @Override - public Void run() throws IOException { + public Void call() throws IOException { jobHistoryService.refreshJobRetentionSettings(); return null; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java index 2a7a9f3c80bef..127e33b8cd68e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java @@ -26,6 +26,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.junit.jupiter.api.AfterAll; @@ -106,7 +107,7 @@ public void testTwoThreadsQueryingDifferentJobOfSameUser() * files in one child thread. */ createJhistFile(job1); - webRequest1 = new Thread( + webRequest1 = new HadoopThread( new Runnable() { @Override public void run() { @@ -136,7 +137,7 @@ public void run() { * will also see the job history files for job1. */ createJhistFile(job2); - webRequest2 = new Thread( + webRequest2 = new HadoopThread( new Runnable() { @Override public void run() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/TestHSAdminServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/TestHSAdminServer.java index ba57928577055..bc743b9905687 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/TestHSAdminServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/TestHSAdminServer.java @@ -25,14 +25,13 @@ import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; -import java.security.PrivilegedAction; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.Callable; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; @@ -329,9 +328,9 @@ public void testUGIForLogAndJobRefresh(boolean pEnableSecurity) throws Exception hsAdminServer.setLoginUGI(loginUGI); // Run refresh log retention settings with test user - ugi.doAs(new PrivilegedAction() { + ugi.callAsNoException(new Callable() { @Override - public Void run() { + public Void call() { String[] args = new String[1]; args[0] = "-refreshLogRetentionSettings"; try { @@ -344,16 +343,16 @@ public Void run() { }); // Verify if AggregatedLogDeletionService#refreshLogRetentionSettings was // called with login UGI, instead of the UGI command was run with. - verify(loginUGI).doAs(any(PrivilegedExceptionAction.class)); + verify(loginUGI).callAs(any(Callable.class)); verify(alds).refreshLogRetentionSettings(); // Reset for refresh job retention settings reset(loginUGI); // Run refresh job retention settings with test user - ugi.doAs(new PrivilegedAction() { + ugi.callAsNoException(new Callable() { @Override - public Void run() { + public Void call() { String[] args = new String[1]; args[0] = "-refreshJobRetentionSettings"; try { @@ -366,7 +365,7 @@ public Void run() { }); // Verify if JobHistory#refreshJobRetentionSettings was called with // login UGI, instead of the UGI command was run with. - verify(loginUGI).doAs(any(PrivilegedExceptionAction.class)); + verify(loginUGI).callAs(any(Callable.class)); verify(jobHistoryService).refreshJobRetentionSettings(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java index f4babf96703c1..5afed718e6b6b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java @@ -19,9 +19,10 @@ package org.apache.hadoop.mapred; import java.io.IOException; -import java.security.PrivilegedAction; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.Callable; + import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.RPC; @@ -89,9 +90,9 @@ protected MRClientProtocol instantiateHistoryProxy() final YarnRPC rpc = YarnRPC.create(conf); LOG.debug("Connected to HistoryServer at: " + serviceAddr); UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); - return currentUser.doAs(new PrivilegedAction() { + return currentUser.callAsNoException(new Callable() { @Override - public MRClientProtocol run() { + public MRClientProtocol call() { return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class, NetUtils.createSocketAddr(serviceAddr), conf); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java index d37209a167afd..6c4e58d637e35 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java @@ -22,10 +22,10 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; import java.util.EnumSet; import java.util.HashMap; import java.util.List; +import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.lang3.StringUtils; @@ -201,9 +201,9 @@ private MRClientProtocol getProxy() throws IOException { } LOG.debug("Connecting to " + serviceAddr); final InetSocketAddress finalServiceAddr = serviceAddr; - realProxy = newUgi.doAs(new PrivilegedExceptionAction() { + realProxy = newUgi.callAs(new Callable() { @Override - public MRClientProtocol run() throws IOException { + public MRClientProtocol call() throws IOException { return instantiateAMProxy(finalServiceAddr); } }); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java index 7f26bb33e8179..2144d275aae6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobID; import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -110,7 +111,7 @@ public static void doJobControlTest() throws Exception { theControl.addJob(job_3); theControl.addJob(job_4); - Thread theController = new Thread(theControl); + Thread theController = new HadoopThread(theControl); theController.start(); while (!theControl.allFinished()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java index 92a5868a56b9a..efa4c6d325670 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -115,7 +116,7 @@ public void testLocalJobControlDataCopy() throws Exception { theControl.addJob(job_3); theControl.addJob(job_4); - Thread theController = new Thread(theControl); + Thread theController = new HadoopThread(theControl); theController.start(); while (!theControl.allFinished()) { LOG.debug("Jobs in waiting state: " + theControl.getWaitingJobs().size()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java index 46ac5cacae62a..1f1c8de5e7446 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java @@ -35,6 +35,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -69,12 +70,15 @@ import org.apache.hadoop.util.Progressable; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Timeout.ThreadMode.SEPARATE_THREAD; +@Timeout(value=1, unit = TimeUnit.MINUTES, threadMode = SEPARATE_THREAD) public class TestPipeApplication { private static File workSpace = new File("target", TestPipeApplication.class.getName() + "-workSpace"); @@ -515,10 +519,6 @@ private static class SocketCleaner extends PingSocketCleaner { } @Override - public void run() { - super.run(); - } - protected void closeSocketInternal(Socket clientSocket) { if (!clientSocket.isClosed()) { closeSocketCount++; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java index 1bae2b0fe2c73..f9ef635183521 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java @@ -28,6 +28,7 @@ import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.MapReduceTestUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -112,7 +113,7 @@ private JobControl createDependencies(Configuration conf, Job job1) theControl.addJob(cjob2); theControl.addJob(cjob3); theControl.addJob(cjob4); - Thread theController = new Thread(theControl); + Thread theController = new HadoopThread(theControl); theController.start(); return theControl; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java index 0b423797e5c82..4b48e50a876ed 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; /** @@ -148,7 +149,7 @@ private ControlledJob createFailedControlledJob(JobControl jobControl, } private void runJobControl(JobControl jobControl) { - Thread controller = new Thread(jobControl); + Thread controller = new HadoopThread(jobControl); controller.start(); waitTillAllFinished(jobControl); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java index 356b8aad8cade..1b9ef79b59c4b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java @@ -23,8 +23,7 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.test.LambdaTestUtils; @@ -218,9 +217,9 @@ private Token getDelegationToken( // Get the delegation token directly as it is a little difficult to setup // the kerberos based rpc. Token token = loggedInUser - .doAs(new PrivilegedExceptionAction() { + .callAs(new Callable() { @Override - public Token run() throws IOException { + public Token call() throws IOException { GetDelegationTokenRequest request = Records .newRecord(GetDelegationTokenRequest.class); request.setRenewer(renewerString); @@ -234,10 +233,10 @@ public Token run() throws IOException { private long renewDelegationToken(final UserGroupInformation loggedInUser, final MRClientProtocol hsService, final Token dToken) throws IOException, InterruptedException { - long nextExpTime = loggedInUser.doAs(new PrivilegedExceptionAction() { + long nextExpTime = loggedInUser.callAs(new Callable() { @Override - public Long run() throws IOException { + public Long call() throws IOException { RenewDelegationTokenRequest request = Records .newRecord(RenewDelegationTokenRequest.class); request.setDelegationToken(dToken); @@ -251,9 +250,9 @@ private void cancelDelegationToken(final UserGroupInformation loggedInUser, final MRClientProtocol hsService, final Token dToken) throws IOException, InterruptedException { - loggedInUser.doAs(new PrivilegedExceptionAction() { + loggedInUser.callAs(new Callable() { @Override - public Void run() throws IOException { + public Void call() throws IOException { CancelDelegationTokenRequest request = Records .newRecord(CancelDelegationTokenRequest.class); request.setDelegationToken(dToken); @@ -270,10 +269,10 @@ private MRClientProtocol getMRClientProtocol(Token token, final YarnRPC rpc = YarnRPC.create(conf); MRClientProtocol hsWithDT = ugi - .doAs(new PrivilegedAction() { + .callAsNoException(new Callable() { @Override - public MRClientProtocol run() { + public MRClientProtocol call() { return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class, hsAddress, conf); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java index 6a437b123c8ba..33a0088454fb5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java @@ -22,6 +22,7 @@ import org.apache.hadoop.mapred.Task.TaskReporter; import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,7 +86,7 @@ public synchronized void start() { // init counters used by native side, // so they will have correct display name initUsedCounters(); - checker = new Thread(this); + checker = new HadoopThread(this); checker.setDaemon(true); checker.start(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java index 3ce6936c3d7dc..8d8ee453aa58e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java @@ -42,6 +42,7 @@ import org.apache.hadoop.util.IndexedSortable; import org.apache.hadoop.util.QuickSort; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.functional.FutureIO; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY; @@ -145,11 +146,11 @@ public static void writePartitionFile(final JobContext job, for(int i=0; i < samples; ++i) { final int idx = i; samplerReader[i] = - new Thread (threadGroup,"Sampler Reader " + idx) { + new HadoopThread (threadGroup, "Sampler Reader " + idx) { { setDaemon(true); } - public void run() { + public void work() { long records = 0; try { TaskAttemptContext context = new TaskAttemptContextImpl( diff --git a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java index 4191d21687806..407ff4f13d602 100644 --- a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java +++ b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java @@ -40,7 +40,7 @@ import org.apache.hadoop.util.ToolRunner; import java.io.File; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; /** * This is a child program designed to be used by the {@link HadoopArchiveLogs} @@ -120,9 +120,9 @@ public int run(String[] args) throws Exception { "impersonate " + user); UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(user, loginUser); - exitCode = proxyUser.doAs(new PrivilegedExceptionAction() { + exitCode = proxyUser.callAs(new Callable() { @Override - public Integer run() throws Exception { + public Integer call() throws Exception { return runInternal(); } }); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java index 7222e923e963b..6d7457894cec6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java @@ -24,6 +24,7 @@ import java.net.URI; import java.security.PrivilegedExceptionAction; import java.time.Duration; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.Test; @@ -555,9 +556,9 @@ public void testUsernameFromUGI() throws Throwable { UserGroupInformation.createUserForTesting(alice, new String[]{"users", "administrators"}); conf = new Configuration(); - fs = fakeUser.doAs(new PrivilegedExceptionAction() { + fs = fakeUser.callAs(new Callable() { @Override - public S3AFileSystem run() throws Exception{ + public S3AFileSystem call() throws Exception{ return S3ATestUtils.createTestFileSystem(conf); } }); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java index 12a1cd7d8f63e..18bb0944792ab 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticsContext; import org.apache.hadoop.fs.statistics.impl.IOStatisticsContextImpl; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.functional.CloseableTaskPoolSubmitter; import org.apache.hadoop.util.functional.TaskPool; @@ -457,7 +458,7 @@ public void testListingThroughTaskPool() throws Throwable { * If constructed with an IOStatisticsContext then * that context is switched to before performing the IO. */ - private class TestWorkerThread extends Thread implements Runnable { + private class TestWorkerThread extends HadoopThread implements Runnable { private final Path workerThreadPath; private final IOStatisticsContext ioStatisticsContext; @@ -475,7 +476,7 @@ private class TestWorkerThread extends Thread implements Runnable { } @Override - public void run() { + public void work() { // Setting the worker thread's name. Thread.currentThread().setName("worker thread"); S3AFileSystem fs = getFileSystem(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java index 1a4d354d5edc8..898be471a7665 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java @@ -19,12 +19,12 @@ package org.apache.hadoop.fs.s3a.auth; import java.io.IOException; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicInteger; import org.junit.jupiter.api.AfterEach; @@ -165,7 +165,7 @@ private S3AFileSystem runStoreOperationsAndVerify(UserGroupInformation ugi, Path finalPath, String identifier) throws IOException, InterruptedException { Configuration conf = createTestConfig(identifier); - return ugi.doAs((PrivilegedExceptionAction) () -> { + return ugi.callAs((Callable) () -> { int instantiationCount = CustomSigner.getInstantiationCount(); int invocationCount = CustomSigner.getInvocationCount(); S3AFileSystem fs = (S3AFileSystem)finalPath.getFileSystem(conf); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestHttpSigner.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestHttpSigner.java index 1a60c012ba7c8..0ea2ecfa60bb6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestHttpSigner.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestHttpSigner.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.s3a.auth; import java.io.IOException; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -128,7 +128,7 @@ private S3AFileSystem runStoreOperationsAndVerify(UserGroupInformation ugi, Path finalPath, String identifier) throws IOException, InterruptedException { Configuration conf = createTestConfig(identifier); - return ugi.doAs((PrivilegedExceptionAction) () -> { + return ugi.callAs((Callable) () -> { S3AFileSystem fs = (S3AFileSystem)finalPath.getFileSystem(conf); fs.mkdirs(finalPath); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestSignerManager.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestSignerManager.java index 985ab00aa262e..f8e417c8a4f8f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestSignerManager.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestSignerManager.java @@ -20,10 +20,10 @@ import java.io.Closeable; import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.concurrent.Callable; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.signer.Signer; @@ -272,7 +272,7 @@ public void testSignerInitializerMultipleInstances() private void attemptSignAndVerify(String identifier, String bucket, UserGroupInformation ugi, boolean expectNullStoreInfo) throws IOException, InterruptedException { - ugi.doAs((PrivilegedExceptionAction) () -> { + ugi.callAs((Callable) () -> { Signer signer = new SignerForInitializerTest(); SdkHttpFullRequest signableRequest = constructSignableRequest(bucket); signer.sign(signableRequest, null); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java index 7cd1821c3da50..b3c9c800470fa 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java @@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -256,7 +257,7 @@ public AzureFileSystemThreadFactory(String prefix) { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); // Use current thread name as part in naming thread such that use of // same file system object will have unique names. diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java index 4c8d5fb6a5f71..553ee4a969eb9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.impl.StoreImplementationUtils; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FSExceptionMessages; @@ -821,7 +822,7 @@ class UploaderThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setName(String.format("%s-%d", THREAD_ID_PREFIX, threadSequenceNumber.getAndIncrement())); return t; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java index f4ec1721ec490..d59bd687e6b53 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java @@ -44,6 +44,7 @@ import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; import java.util.List; +import java.util.concurrent.Callable; /** * Helper class the has constants and helper methods @@ -128,8 +129,8 @@ public String makeRemoteRequest(final String[] urls, } String s = null; try { - s = connectUgi.doAs(new PrivilegedExceptionAction() { - @Override public String run() throws Exception { + s = connectUgi.callAs(new Callable() { + @Override public String call() throws Exception { return retryableRequest(urls, path, queryParams, httpMethod); } }); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java index 989c3ba6d9340..19c67006c3cbf 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.azure; import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.classification.VisibleForTesting; import com.microsoft.azure.storage.AccessCondition; @@ -105,7 +105,7 @@ public SelfRenewingLease(CloudBlobWrapper blobWrapper, boolean throwIfPresent) } } } - renewer = new Thread(new Renewer()); + renewer = new HadoopThread(new Renewer()); // A Renewer running should not keep JVM from exiting, so make it a daemon. renewer.setDaemon(true); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java index d3fe4aefeb050..258f8b0e8fc64 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java @@ -22,6 +22,8 @@ import java.util.Date; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.SubjectUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * Internal implementation class to help calculate the current bytes @@ -67,7 +69,7 @@ public BandwidthGaugeUpdater(AzureFileSystemInstrumentation instrumentation, this.windowSizeMs = windowSizeMs; this.instrumentation = instrumentation; if (!manualUpdateTrigger) { - uploadBandwidthUpdater = new Thread(new UploadBandwidthUpdater(), THREAD_NAME); + uploadBandwidthUpdater = new HadoopThread(new UploadBandwidthUpdater(), THREAD_NAME); uploadBandwidthUpdater.setDaemon(true); uploadBandwidthUpdater.start(); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java index 74f5aa4ffb573..56f2a2ba9c6ef 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultEntrySchema; import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.ROOT_PATH; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_LIST_MAX_RESULTS; @@ -151,7 +152,7 @@ public boolean listRecursiveAndTakeAction() Thread producerThread = null; try { ListBlobQueue listBlobQueue = createListBlobQueue(configuration); - producerThread = new Thread(() -> { + producerThread = new HadoopThread(() -> { try { produceConsumableList(listBlobQueue); } catch (AzureBlobFileSystemException e) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java index f28a15fd7149f..75e9e1dfc806c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java @@ -30,6 +30,8 @@ import org.apache.hadoop.fs.azure.integration.AzureTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.util.SubjectUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * Handle OOB IO into a shared container. @@ -74,7 +76,7 @@ public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) { * Start writing blocks to Azure storage. */ public void startWriting() { - runner = new Thread(this); // Create the block writer thread. + runner = new HadoopThread(this); // Create the block writer thread. runner.start(); // Start the block writer thread. } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java index ab175ba6c5c15..a1fcf43972f97 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*; @@ -94,7 +95,7 @@ public void testMultiThreadedBlockBlobReadScenario() throws Throwable { Path testFilePath1 = new Path(base, "test1.dat"); Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new HadoopThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -121,7 +122,7 @@ public void testMultiThreadBlockBlobSeekScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new HadoopThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -142,7 +143,7 @@ public void testMultiThreadedPageBlobSetPermissionScenario() createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setPermission(testPath, @@ -161,7 +162,7 @@ public void testMultiThreadedBlockBlobSetPermissionScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setPermission(testPath, @@ -179,7 +180,7 @@ public void testMultiThreadedBlockBlobSetPermissionScenario() public void testMultiThreadedPageBlobOpenScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { inputStream = fs.open(testPath); @@ -200,7 +201,7 @@ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable { createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { @@ -219,7 +220,7 @@ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable { public void testMultiThreadedBlockBlobSetOwnerScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setOwner(testPath, "testowner", "testgroup"); @@ -237,7 +238,7 @@ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable { createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setOwner(testPath, "testowner", "testgroup"); @@ -253,7 +254,7 @@ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable { public void testMultiThreadedBlockBlobListStatusScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createTestFolder(createTestAccount(), testFolderPath); - Thread t = new Thread(new DeleteThread(fs, testFolderPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testFolderPath)); t.start(); while (t.isAlive()) { fs.listStatus(testFolderPath); @@ -271,7 +272,7 @@ public void testMultiThreadedPageBlobListStatusScenario() throws Throwable { createTestFolder( getPageBlobTestStorageAccount(), testFolderPath); - Thread t = new Thread(new DeleteThread(fs, testFolderPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testFolderPath)); t.start(); while (t.isAlive()) { fs.listStatus(testFolderPath); @@ -293,7 +294,7 @@ public void testMultiThreadedPageBlobReadScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new HadoopThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -318,7 +319,7 @@ public void testMultiThreadedPageBlobSeekScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new HadoopThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java index 918866a73e5d7..92f3ad4388192 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -67,7 +68,7 @@ public void testMultipleRenameFileOperationsToSameDestination() for (int i = 0; i < 10; i++) { final int threadNumber = i; Path src = path("test" + threadNumber); - threads.add(new Thread(() -> { + threads.add(new HadoopThread(() -> { try { latch.await(Long.MAX_VALUE, TimeUnit.SECONDS); } catch (InterruptedException e) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java index ecf6e17b82aa7..415a612f4bcb1 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending; @@ -1643,9 +1644,9 @@ public void testLeaseAsDistributedLock() throws IllegalArgumentException, NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs; String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(LEASE_LOCK_FILE_KEY))); - Thread first = new Thread(new LeaseLockAction("first-thread", fullKey)); + Thread first = new HadoopThread(new LeaseLockAction("first-thread", fullKey)); first.start(); - Thread second = new Thread(new LeaseLockAction("second-thread", fullKey)); + Thread second = new HadoopThread(new LeaseLockAction("second-thread", fullKey)); second.start(); try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java index 6cc6903d4930d..9f8f89e991947 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java @@ -653,9 +653,9 @@ public void testRenameWithStickyBitNegative() throws Throwable { UserGroupInformation dummyUser = UserGroupInformation.createUserForTesting( "dummyUser", new String[] {"dummygroup"}); - dummyUser.doAs(new PrivilegedExceptionAction() { + dummyUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { // Add auth rules for dummyuser authorizer.addAuthRule(parentSrcDir.toString(), WRITE, getCurrentUserShortName(), true); @@ -710,9 +710,9 @@ public void testRenameOnNonExistentSourceWithStickyBit() throws Throwable { UserGroupInformation dummyUser = UserGroupInformation.createUserForTesting( "dummyUser", new String[] {"dummygroup"}); - dummyUser.doAs(new PrivilegedExceptionAction() { + dummyUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { // Add auth rules for dummyuser authorizer.addAuthRule(parentSrcDir.toString(), WRITE, getCurrentUserShortName(), true); @@ -1005,9 +1005,9 @@ public void testSingleFileDeleteWithStickyBitNegative() throws Throwable { UserGroupInformation dummyUser = UserGroupInformation.createUserForTesting( "dummyUser", new String[] {"dummygroup"}); - dummyUser.doAs(new PrivilegedExceptionAction() { + dummyUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { try { authorizer.addAuthRule(parentDir.toString(), WRITE, getCurrentUserShortName(), true); @@ -1095,9 +1095,9 @@ public void testRecursiveDeleteFailsWithStickybit() throws Throwable { UserGroupInformation dummyUser = UserGroupInformation.createUserForTesting( "dummyUser", new String[] {"dummygroup"}); - dummyUser.doAs(new PrivilegedExceptionAction() { + dummyUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { // Add auth rules for dummyuser authorizer.addAuthRule("/", WRITE, getCurrentUserShortName(), true); authorizer.addAuthRule("/testRecursiveDeleteFailsWithStickybit*", @@ -1145,9 +1145,9 @@ public void testDeleteSucceedsForOnlyFilesOwnedByUserWithStickybitSet() UserGroupInformation dummyUser = UserGroupInformation.createUserForTesting( "dummyuser", new String[] {"dummygroup"}); - dummyUser.doAs(new PrivilegedExceptionAction() { + dummyUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { authorizer.addAuthRule("/", WRITE, getCurrentUserShortName(), true); authorizer.addAuthRule("/testDeleteSucceedsForOnlyFilesOwnedByUserWithStickybitSet*", WRITE, getCurrentUserShortName(), true); @@ -1199,9 +1199,9 @@ public void testDeleteSucceedsForParentDirectoryOwnerUserWithStickybit() throws // create child with owner as dummyUser UserGroupInformation dummyUser = UserGroupInformation.createUserForTesting( "user1", new String[] {"dummygroup"}); - dummyUser.doAs(new PrivilegedExceptionAction() { + dummyUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { authorizer.addAuthRule(parentDir.toString(), WRITE, getCurrentUserShortName(), true); fs.create(testFilePath); ContractTestUtils.assertPathExists(fs, "file was not created", testFilePath); @@ -1456,9 +1456,9 @@ public void testOwnerPermissionNegative() throws Throwable { UserGroupInformation ugiSuperUser = UserGroupInformation.createUserForTesting( "testuser", new String[] {}); - ugiSuperUser.doAs(new PrivilegedExceptionAction() { + ugiSuperUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { fs.mkdirs(childDir); return null; } @@ -1503,9 +1503,9 @@ public void testSetOwnerThrowsForUnauthorisedUsers() throws Throwable { ContractTestUtils.assertPathExists(fs, "test path does not exist", testPath); owner = fs.getFileStatus(testPath).getOwner(); - unauthorisedUser.doAs(new PrivilegedExceptionAction() { + unauthorisedUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { try { fs.setOwner(testPath, "newowner", null); fail("Failing test because setOwner call was expected to throw"); @@ -1548,9 +1548,9 @@ public void testSetOwnerSucceedsForAuthorisedUsers() throws Throwable { .as("changing owner requires original and new owner to be different") .isNotEqualToIgnoringCase(newOwner); - authorisedUser.doAs(new PrivilegedExceptionAction() { + authorisedUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { fs.setOwner(testPath, newOwner, newGroup); assertOwnerEquals(testPath, newOwner); assertEquals(newGroup, fs.getFileStatus(testPath).getGroup()); @@ -1592,9 +1592,9 @@ public void testSetOwnerSucceedsForAnyUserWhenWildCardIsSpecified() throws Throw .as("changing owner requires original and new owner to be different") .isNotEqualToIgnoringCase(newOwner); - user.doAs(new PrivilegedExceptionAction() { + user.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { fs.setOwner(testPath, newOwner, newGroup); assertOwnerEquals(testPath, newOwner); assertEquals(newGroup, fs.getFileStatus(testPath).getGroup()); @@ -1630,9 +1630,9 @@ public void testSetOwnerFailsForIllegalSetup() throws Throwable { final String owner = fs.getFileStatus(testPath).getOwner(); - user.doAs(new PrivilegedExceptionAction() { + user.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { try { fs.setOwner(testPath, "newowner", null); fail("Failing test because setOwner call was expected to throw"); @@ -2028,9 +2028,9 @@ private void executeSetPermissionFailure(UserGroupInformation testUser, Path testPath, FsPermission oldPermission, FsPermission newPermission, boolean isInvalidSetup) throws Throwable { - testUser.doAs(new PrivilegedExceptionAction() { + testUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { try { //READ access required for getFileStatus fs.setPermission(testPath, newPermission); @@ -2068,9 +2068,9 @@ private void executeSetPermissionSuccess(UserGroupInformation testUser, throws Throwable { //If user is given, then use doAs if (testUser != null) { - testUser.doAs(new PrivilegedExceptionAction() { + testUser.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { fs.setPermission(testPath, newPermission); return null; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java index f801f5e9ddae9..8e2feeb64448f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -149,7 +150,7 @@ public void testMultiThreadedOperation() throws Exception { final ConcurrentLinkedQueue exceptionsEncountered = new ConcurrentLinkedQueue(); for (int i = 0; i < numThreads; i++) { final Path threadLocalFile = new Path("/myFile" + i); - threads[i] = new Thread(new Runnable() { + threads[i] = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java index 187aa02cceb93..efff0c79c6b62 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java @@ -24,6 +24,7 @@ import java.util.Date; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; public class TestBandwidthGaugeUpdater { @@ -56,7 +57,7 @@ public void testMultiThreaded() throws Exception { new BandwidthGaugeUpdater(instrumentation, 1000, true); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(new Runnable() { + threads[i] = new HadoopThread(new Runnable() { @Override public void run() { updater.blockDownloaded(new Date(), new Date(), 10); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index 0f7e6d9009b8a..3be5c555eb24d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -63,6 +63,7 @@ import org.apache.hadoop.fs.store.BlockUploadStatistics; import org.apache.hadoop.fs.store.DataBlocks; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; import static java.net.HttpURLConnection.HTTP_OK; @@ -1243,7 +1244,7 @@ public void testFlushSuccessWithConnectionResetOnResponseInvalidMd5() throws Exc out1.write(bytes1); //parallel flush call should lead to the first call failing because of md5 mismatch. - Thread parallelFlushThread = new Thread(() -> { + Thread parallelFlushThread = new HadoopThread(() -> { try { out1.hsync(); } catch (IOException e) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index e063f71e8c2da..cb87293eb6784 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -69,6 +69,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticAssertions; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.functional.FunctionRaisingIOE; import static java.net.HttpURLConnection.HTTP_CLIENT_TIMEOUT; @@ -1011,7 +1012,7 @@ public void testParallelRenameForAtomicRenameShouldFail() throws Exception { .acquireLease(Mockito.anyString(), Mockito.anyInt(), Mockito.nullable(String.class), Mockito.any(TracingContext.class)); - new Thread(() -> { + new HadoopThread(() -> { while (!leaseAcquired.get()) {} try { fs.rename(src, dst); @@ -1061,7 +1062,7 @@ public void testAppendAtomicBlobDuringRename() throws Exception { return answer.callRealMethod(); }).when(client).copyBlob(Mockito.any(Path.class), Mockito.any(Path.class), Mockito.nullable(String.class), Mockito.any(TracingContext.class)); - new Thread(() -> { + new HadoopThread(() -> { while (!copyInProgress.get()) {} try { os.write(1); diff --git a/hadoop-tools/hadoop-compat-bench/pom.xml b/hadoop-tools/hadoop-compat-bench/pom.xml index b6ef232850e9e..20ab74b455b80 100644 --- a/hadoop-tools/hadoop-compat-bench/pom.xml +++ b/hadoop-tools/hadoop-compat-bench/pom.xml @@ -64,6 +64,14 @@ test-jar test + + + com.sun.xml.bind + jaxb-impl + + 2.3.9 + test + org.mockito mockito-inline diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatTool.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatTool.java index 94167336f168f..038cd8331b155 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatTool.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatTool.java @@ -25,8 +25,8 @@ import java.io.OutputStreamWriter; import java.io.PrintStream; import java.nio.charset.StandardCharsets; -import java.security.PrivilegedExceptionAction; import java.util.Collection; +import java.util.concurrent.Callable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -83,10 +83,10 @@ public HdfsCompatTool(Configuration conf, PrintStream out, PrintStream err) { @Override public int run(final String[] args) throws Exception { try { - return UserGroupInformation.getCurrentUser().doAs( - new PrivilegedExceptionAction() { + return UserGroupInformation.getCurrentUser().callAs( + new Callable() { @Override - public Integer run() { + public Integer call() { return runImpl(args); } }); diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatFile.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatFile.java index a76f95fb8d735..c4b27e66aa8b8 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatFile.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatFile.java @@ -27,8 +27,8 @@ import org.junit.Assert; import java.io.IOException; -import java.security.PrivilegedExceptionAction; import java.util.Random; +import java.util.concurrent.Callable; @HdfsCompatCaseGroup(name = "File") public class HdfsCompatFile extends AbstractHdfsCompatCase { @@ -125,8 +125,8 @@ public void setOwner() throws Exception { final String owner = "test_" + RANDOM.nextInt(1024); final String group = "test_" + RANDOM.nextInt(1024); final String privileged = getPrivilegedUser(); - UserGroupInformation.createRemoteUser(privileged).doAs( - (PrivilegedExceptionAction) () -> { + UserGroupInformation.createRemoteUser(privileged).callAs( + (Callable) () -> { FileSystem.newInstance(fs().getUri(), fs().getConf()) .setOwner(file, owner, group); return null; diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java index 6b6596c38d821..ba6141fa6a032 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -367,7 +368,7 @@ private List readLines(File file) throws IOException { return lines; } - private static final class StreamPrinter extends Thread { + private static final class StreamPrinter extends HadoopThread { private final InputStream in; private final List lines; @@ -377,7 +378,7 @@ private StreamPrinter(InputStream in) { } @Override - public void run() { + public void work() { try (BufferedReader br = new BufferedReader( new InputStreamReader(in, StandardCharsets.UTF_8))) { String line = br.readLine(); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java index 0afb94563af5e..43f2dc942a181 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java @@ -23,11 +23,11 @@ import java.io.OutputStream; import java.io.PrintWriter; import java.io.StringWriter; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; import java.util.Random; +import java.util.concurrent.Callable; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -587,10 +587,10 @@ public void testPreserve() { final CopyMapper copyMapper = new CopyMapper(); final Mapper.Context context = - tmpUser.doAs( - new PrivilegedAction.Context>() { + tmpUser.callAsNoException( + new Callable.Context>() { @Override - public Mapper.Context run() { + public Mapper.Context call() { try { StubContext stubContext = new StubContext(getConfiguration(), null, 0); return stubContext.getContext(); @@ -613,9 +613,9 @@ public Mapper.Context run() { mkdirs(TARGET_PATH); cluster.getFileSystem().setPermission(new Path(TARGET_PATH), new FsPermission((short)511)); - final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction() { + final FileSystem tmpFS = tmpUser.callAsNoException(new Callable() { @Override - public FileSystem run() { + public FileSystem call() { try { return FileSystem.get(cluster.getConfiguration(0)); } catch (IOException e) { @@ -626,9 +626,9 @@ public FileSystem run() { } }); - tmpUser.doAs(new PrivilegedAction() { + tmpUser.callAsNoException(new Callable() { @Override - public Integer run() { + public Integer call() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), @@ -662,10 +662,10 @@ public void testCopyReadableFiles() { final CopyMapper copyMapper = new CopyMapper(); final Mapper.Context context = - tmpUser.doAs( - new PrivilegedAction.Context>() { + tmpUser.callAsNoException( + new Callable.Context>() { @Override - public Mapper.Context run() { + public Mapper.Context call() { try { StubContext stubContext = new StubContext(getConfiguration(), null, 0); return stubContext.getContext(); @@ -683,9 +683,9 @@ public Mapper.Context run() { cluster.getFileSystem().setPermission(new Path(TARGET_PATH), new FsPermission((short)511)); - final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction() { + final FileSystem tmpFS = tmpUser.callAsNoException(new Callable() { @Override - public FileSystem run() { + public FileSystem call() { try { return FileSystem.get(cluster.getConfiguration(0)); } catch (IOException e) { @@ -696,9 +696,9 @@ public FileSystem run() { } }); - tmpUser.doAs(new PrivilegedAction() { + tmpUser.callAsNoException(new Callable() { @Override - public Integer run() { + public Integer call() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), @@ -729,9 +729,9 @@ public void testSkipCopyNoPerms() { final CopyMapper copyMapper = new CopyMapper(); final StubContext stubContext = tmpUser. - doAs(new PrivilegedAction() { + callAsNoException(new Callable() { @Override - public StubContext run() { + public StubContext call() { try { return new StubContext(getConfiguration(), null, 0); } catch (Exception e) { @@ -759,9 +759,9 @@ public StubContext run() { cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file"), new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ)); - final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction() { + final FileSystem tmpFS = tmpUser.callAsNoException(new Callable() { @Override - public FileSystem run() { + public FileSystem call() { try { return FileSystem.get(cluster.getConfiguration(0)); } catch (IOException e) { @@ -772,9 +772,9 @@ public FileSystem run() { } }); - tmpUser.doAs(new PrivilegedAction() { + tmpUser.callAsNoException(new Callable() { @Override - public Integer run() { + public Integer call() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), @@ -809,9 +809,9 @@ public void testFailCopyWithAccessControlException() { final CopyMapper copyMapper = new CopyMapper(); final StubContext stubContext = tmpUser. - doAs(new PrivilegedAction() { + callAsNoException(new Callable() { @Override - public StubContext run() { + public StubContext call() { try { return new StubContext(getConfiguration(), null, 0); } catch (Exception e) { @@ -841,9 +841,9 @@ public StubContext run() { cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file"), new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ)); - final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction() { + final FileSystem tmpFS = tmpUser.callAsNoException(new Callable() { @Override - public FileSystem run() { + public FileSystem call() { try { return FileSystem.get(cluster.getConfiguration(0)); } catch (IOException e) { @@ -854,9 +854,9 @@ public FileSystem run() { } }); - tmpUser.doAs(new PrivilegedAction() { + tmpUser.callAsNoException(new Callable() { @Override - public Integer run() { + public Integer call() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), @@ -972,11 +972,11 @@ private void doTestIgnoreFailuresDoubleWrapped(final boolean ignoreFailures) { final CopyMapper copyMapper = new CopyMapper(); final Mapper.Context context = - tmpUser.doAs(new PrivilegedAction< + tmpUser.callAsNoException(new Callable< Mapper.Context>() { @Override public Mapper.Context - run() { + call() { try { StubContext stubContext = new StubContext( getConfiguration(), null, 0); @@ -998,9 +998,9 @@ private void doTestIgnoreFailuresDoubleWrapped(final boolean ignoreFailures) { context.getConfiguration().setBoolean( DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), ignoreFailures); - final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction() { + final FileSystem tmpFS = tmpUser.callAsNoException(new Callable() { @Override - public FileSystem run() { + public FileSystem call() { try { return FileSystem.get(cluster.getConfiguration(0)); } catch (IOException e) { @@ -1010,9 +1010,9 @@ public FileSystem run() { } }); - tmpUser.doAs(new PrivilegedAction() { + tmpUser.callAsNoException(new Callable() { @Override - public Integer run() { + public Integer call() { try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"), diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java index 9c461cb18fb8a..ed22fc7e875a0 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java @@ -23,6 +23,7 @@ import org.apache.hadoop.tools.util.WorkReport; import org.apache.hadoop.tools.util.WorkRequest; import org.apache.hadoop.tools.util.WorkRequestProcessor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -146,8 +147,8 @@ public void testMultipleProducerConsumerShutdown() // starts two thread: a source thread which put in work, and a sink thread // which takes a piece of work from ProducerConsumer - class SourceThread extends Thread { - public void run() { + class SourceThread extends HadoopThread { + public void work() { while (true) { try { worker.put(new WorkRequest(42)); @@ -161,8 +162,8 @@ public void run() { // The source thread put requests into producer-consumer. SourceThread source = new SourceThread(); source.start(); - class SinkThread extends Thread { - public void run() { + class SinkThread extends HadoopThread { + public void work() { try { while (true) { WorkReport report = worker.take(); diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java index e44f811f0db41..63b376fd317bd 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java @@ -19,6 +19,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import java.io.IOException; import java.nio.ByteBuffer; @@ -549,7 +550,7 @@ public void onContainersAllocated(List allocatedContainers) { + container.getNodeHttpAddress() + ", containerResourceMemory=" + rsrc.getMemorySize() + ", containerResourceVirtualCores=" + rsrc.getVirtualCores()); - Thread launchThread = new Thread(containerLauncher); + Thread launchThread = new HadoopThread(containerLauncher); // launch and start the container on a separate thread to keep // the main thread unblocked diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java index 0c57542747e41..d969a4908d5b3 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java @@ -77,6 +77,7 @@ import org.apache.hadoop.util.ClassUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; @@ -891,7 +892,7 @@ private boolean monitorInfraApplication() throws YarnException, IOException { boolean loggedApplicationInfo = false; boolean success = false; - Thread namenodeMonitoringThread = new Thread(() -> { + Thread namenodeMonitoringThread = new HadoopThread(() -> { Supplier exitCritera = () -> Apps.isApplicationFinalState(infraAppState); Optional namenodeProperties = Optional.empty(); diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java index f6c8a6ac4d58b..17742bbc6db7b 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java @@ -52,6 +52,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; @@ -319,7 +320,7 @@ static void waitForNameNodeReadiness(final Properties nameNodeProperties, .get(getNameNodeHdfsUri(nameNodeProperties), conf); log.info("Launching thread to trigger block reports for Datanodes with <" + blockThreshold + " blocks reported"); - Thread blockReportThread = new Thread(() -> { + Thread blockReportThread = new HadoopThread(() -> { // Here we count both Missing and UnderReplicated within under // replicated long lastUnderRepBlocks = Long.MAX_VALUE; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java index db34037da7806..19dcdd08f8fd6 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java @@ -57,6 +57,7 @@ import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -461,7 +462,7 @@ private Client createAndStartClient(Configuration localConf) { final Client client = new Client(JarFinder.getJar(ApplicationMaster.class), JarFinder.getJar(Assertions.class)); client.setConf(localConf); - Thread appThread = new Thread(() -> { + Thread appThread = new HadoopThread(() -> { try { client.run(new String[] {"-" + Client.MASTER_MEMORY_MB_ARG, "128", "-" + Client.CONF_PATH_ARG, confZip.toString(), diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java index 14e8c9cb82f16..a3364db7647b0 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java @@ -21,10 +21,10 @@ import org.apache.hadoop.tools.dynamometer.workloadgenerator.WorkloadDriver; import java.io.IOException; import java.net.URI; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.HashMap; import java.util.List; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.DelayQueue; import java.util.concurrent.TimeUnit; @@ -43,6 +43,7 @@ import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper.REPLAYCOUNTERS; import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper.ReplayCommand; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,7 +58,7 @@ * are inserted by the {@link AuditReplayMapper}. Once an item is ready, this * thread will fetch the command from the queue and attempt to replay it. */ -public class AuditReplayThread extends Thread { +public class AuditReplayThread extends HadoopThread { private static final Logger LOG = LoggerFactory.getLogger(AuditReplayThread.class); @@ -154,7 +155,7 @@ Exception getException() { } @Override - public void run() { + public void work() { long currentEpoch = System.currentTimeMillis(); long delay = startTimestampMs - currentEpoch; try { @@ -202,7 +203,7 @@ private boolean replayLog(final AuditReplayCommand command) { if (proxyFs == null) { UserGroupInformation ugi = UserGroupInformation .createProxyUser(command.getSimpleUgi(), loginUser); - proxyFs = ugi.doAs((PrivilegedAction) () -> { + proxyFs = ugi.callAsNoException((Callable) () -> { try { FileSystem fs = new DistributedFileSystem(); fs.initialize(namenodeUri, mapperConf); diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java index 4d4e9a26b3de4..bf9a4c27916ee 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java @@ -36,6 +36,7 @@ import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -309,9 +310,9 @@ public void setJournal(BalanceJournal journal) { /** * This thread consumes the delayQueue and move the jobs to the runningQueue. */ - class Rooster extends Thread { + class Rooster extends HadoopThread { @Override - public void run() { + public void work() { while (running.get()) { try { DelayWrapper dJob = delayQueue.take(); @@ -327,9 +328,9 @@ public void run() { /** * This thread consumes the runningQueue and give the job to the workers. */ - class Reader extends Thread { + class Reader extends HadoopThread { @Override - public void run() { + public void work() { while (running.get()) { try { final BalanceJob job = runningQueue.poll(500, TimeUnit.MILLISECONDS); @@ -361,9 +362,9 @@ public void run() { * This thread consumes the recoverQueue, recovers the job the adds it to the * runningQueue. */ - class Recover extends Thread { + class Recover extends HadoopThread { @Override - public void run() { + public void work() { while (running.get()) { BalanceJob job = null; try { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateData.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateData.java index 41e937fe3c88c..b7135be27776f 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateData.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateData.java @@ -21,11 +21,11 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.OutputStream; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.ArrayList; import java.util.List; import java.util.Random; +import java.util.concurrent.Callable; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -170,8 +170,8 @@ static DataStatistics publishPlainDataStatistics(Configuration conf, public Job call() throws IOException, InterruptedException, ClassNotFoundException { UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - ugi.doAs( new PrivilegedExceptionAction () { - public Job run() throws IOException, ClassNotFoundException, + ugi.callAs( new Callable () { + public Job call() throws IOException, ClassNotFoundException, InterruptedException { // check if compression emulation is enabled if (CompressionEmulationUtil diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java index aa191629cf109..a350cccac3a3a 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java @@ -20,10 +20,10 @@ import java.io.IOException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; import java.util.Random; +import java.util.concurrent.Callable; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -107,8 +107,8 @@ public GenerateDistCacheData(Configuration conf) throws IOException { public Job call() throws IOException, InterruptedException, ClassNotFoundException { UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - ugi.doAs( new PrivilegedExceptionAction () { - public Job run() throws IOException, ClassNotFoundException, + ugi.callAs( new Callable () { + public Job call() throws IOException, ClassNotFoundException, InterruptedException { job.setMapperClass(GenDCDataMapper.class); job.setNumReduceTasks(0); diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java index e476223cf1e23..b9aa966ab7cb5 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java @@ -21,8 +21,8 @@ import java.io.InputStream; import java.io.PrintStream; import java.net.URI; -import java.security.PrivilegedExceptionAction; import java.util.List; +import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -43,6 +43,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.tools.rumen.JobStoryProducer; import org.apache.hadoop.tools.rumen.ZombieJobProducer; import org.slf4j.Logger; @@ -369,8 +370,8 @@ public int run(final String[] argv) throws IOException, InterruptedException { UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - val = ugi.doAs(new PrivilegedExceptionAction() { - public Integer run() throws Exception { + val = ugi.callAs(new Callable() { + public Integer call() throws Exception { return runJob(conf, argv); } }); @@ -627,7 +628,7 @@ private int setupDistCacheEmulation(Configuration conf, String traceIn, * pipeline abort its progress, waiting for each to exit and killing * any jobs still running on the cluster. */ - class Shutdown extends Thread { + class Shutdown extends HadoopThread { static final long FAC_SLEEP = 1000; static final long SUB_SLEEP = 4000; @@ -647,7 +648,7 @@ private void killComponent(Component component, long maxwait) { } @Override - public void run() { + public void work() { LOG.info("Exiting..."); try { killComponent(factory, FAC_SLEEP); // read no more tasks diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java index 325c15c997142..cbd7405d5307c 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java @@ -26,7 +26,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; -import java.security.PrivilegedExceptionAction; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -108,8 +107,8 @@ public GridmixJob(final Configuration conf, long submissionMillis, ((StringBuilder)nameFormat.get().out()).setLength(JOB_NAME_PREFIX.length()); try { - job = this.ugi.doAs(new PrivilegedExceptionAction() { - public Job run() throws IOException { + job = this.ugi.callAs(new Callable() { + public Job call() throws IOException { String jobId = null == jobdesc.getJobID() ? "" @@ -341,8 +340,8 @@ protected GridmixJob(final Configuration conf, long submissionMillis, ugi = UserGroupInformation.getCurrentUser(); try { - job = this.ugi.doAs(new PrivilegedExceptionAction() { - public Job run() throws IOException { + job = this.ugi.callAs(new Callable() { + public Job call() throws IOException { Job ret = Job.getInstance(conf, name); ret.getConfiguration().setInt(GRIDMIX_JOB_SEQ, seq); setJobQueue(ret, conf.get(GRIDMIX_DEFAULT_QUEUE)); diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java index 0b06911be0857..5944d08dfd7f7 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java @@ -35,6 +35,7 @@ import org.apache.hadoop.mapred.gridmix.Statistics.JobStats; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobStatus; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * Component accepting submitted, running {@link Statistics.JobStats} and @@ -133,14 +134,14 @@ List getRemainingJobs() { * Monitoring thread pulling running jobs from the component and into * a queue to be polled for status. */ - private class MonitorThread extends Thread { + private class MonitorThread extends HadoopThread { public MonitorThread(int i) { super("GridmixJobMonitor-" + i); } @Override - public void run() { + public void work() { boolean graceful; boolean shutdown; while (true) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java index d1229ce2d8ff4..af524fdc05295 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java @@ -41,13 +41,14 @@ import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.TaskInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import java.io.IOException; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; import java.util.Random; +import java.util.concurrent.Callable; /** * Synthetic job generated from a trace description. @@ -64,9 +65,9 @@ public LoadJob(final Configuration conf, long submissionMillis, public Job call() throws IOException, InterruptedException, ClassNotFoundException { - ugi.doAs( - new PrivilegedExceptionAction() { - public Job run() throws IOException, ClassNotFoundException, + ugi.callAs( + new Callable() { + public Job call() throws IOException, ClassNotFoundException, InterruptedException { job.setMapperClass(LoadMapper.class); job.setReducerClass(LoadReducer.class); @@ -143,7 +144,7 @@ private void configure() { * This is a progress based resource usage matcher. */ @SuppressWarnings("unchecked") - static class ResourceUsageMatcherRunner extends Thread + static class ResourceUsageMatcherRunner extends HadoopThread implements Progressive { private final ResourceUsageMatcher matcher; private final BoostingProgress progress; @@ -199,7 +200,7 @@ protected void match() throws IOException, InterruptedException { } @Override - public void run() { + public void work() { LOG.info("Resource usage matcher thread started."); try { while (progress.getProgress() < 1) { @@ -234,7 +235,7 @@ void boost(float value) { // Makes sure that the TaskTracker doesn't kill the map/reduce tasks while // they are emulating - private static class StatusReporter extends Thread { + private static class StatusReporter extends HadoopThread { private final TaskAttemptContext context; private final Progressive progress; @@ -244,7 +245,7 @@ private static class StatusReporter extends Thread { } @Override - public void run() { + public void work() { LOG.info("Status reporter thread started."); try { while (!isInterrupted() && progress.getProgress() < 1) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java index fe3b5d36d9841..7c13e9e2c3665 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java @@ -22,6 +22,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,14 +65,14 @@ public Thread createReaderThread() { public void update(Statistics.ClusterStats item) { } - private class ReplayReaderThread extends Thread { + private class ReplayReaderThread extends HadoopThread { public ReplayReaderThread(String threadName) { super(threadName); } - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java index cb05ab63f1c07..d5201f9384206 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java @@ -22,6 +22,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.mapred.gridmix.Statistics.JobStats; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; @@ -59,7 +60,7 @@ public Thread createReaderThread() { return new SerialReaderThread("SerialJobFactory"); } - private class SerialReaderThread extends Thread { + private class SerialReaderThread extends HadoopThread { public SerialReaderThread(String threadName) { super(threadName); @@ -78,7 +79,7 @@ public SerialReaderThread(String threadName) { * == */ @Override - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SleepJob.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SleepJob.java index 50261139f9402..6984774317331 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SleepJob.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SleepJob.java @@ -20,10 +20,10 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; import java.util.Random; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; @@ -102,9 +102,9 @@ protected boolean canEmulateCompression() { @Override public Job call() throws IOException, InterruptedException, ClassNotFoundException { - ugi.doAs( - new PrivilegedExceptionAction() { - public Job run() + ugi.callAs( + new Callable() { + public Job call() throws IOException, ClassNotFoundException, InterruptedException { job.setMapperClass(SleepMapper.class); job.setReducerClass(SleepReducer.class); diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java index bf73f2a1faa55..fb8ed6a7649d2 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java @@ -28,12 +28,13 @@ import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.rumen.JobStory; +import org.apache.hadoop.util.concurrent.HadoopThread; import java.io.IOException; -import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -87,8 +88,8 @@ public Statistics( final Configuration conf, int pollingInterval, CountDownLatch startFlag) throws IOException, InterruptedException { UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - this.cluster = ugi.doAs(new PrivilegedExceptionAction() { - public JobClient run() throws IOException { + this.cluster = ugi.callAs(new Callable() { + public JobClient call() throws IOException { return new JobClient(new JobConf(conf)); } }); @@ -216,13 +217,13 @@ public void start() { statistics.start(); } - private class StatCollector extends Thread { + private class StatCollector extends HadoopThread { StatCollector() { super("StatsCollectorThread"); } - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java index 4e7fc9c2bbd80..bf5ea483e25c0 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java @@ -30,6 +30,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; +import org.apache.hadoop.util.concurrent.HadoopThread; import java.io.IOException; import java.util.HashSet; @@ -136,7 +137,7 @@ public Thread createReaderThread() { * Worker thread responsible for reading descriptions, assigning sequence * numbers, and normalizing time. */ - private class StressReaderThread extends Thread { + private class StressReaderThread extends HadoopThread { public StressReaderThread(String name) { super(name); @@ -152,7 +153,7 @@ public StressReaderThread(String name) { * load the JT. * That is submit (Sigma(no of maps/Job)) > (2 * no of slots available) */ - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java index 23e1413fcad87..850e8ed9af0db 100644 --- a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java +++ b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java @@ -20,13 +20,14 @@ package org.apache.hadoop.resourceestimator.service; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Simple shutdown hook for {@link ResourceEstimatorServer}. */ -public class ShutdownHook extends Thread { +public class ShutdownHook extends HadoopThread { private static final Logger LOGGER = LoggerFactory.getLogger(ShutdownHook.class); private final ResourceEstimatorServer server; @@ -35,7 +36,7 @@ public class ShutdownHook extends Thread { this.server = server; } - public void run() { + public void work() { try { server.shutdown(); } catch (Exception e) { diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java index 3d6541565cb44..750b5dd371e97 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java @@ -38,7 +38,7 @@ import org.apache.hadoop.streaming.io.TextOutputReader; import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.ReflectionUtils; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.io.Text; /** Shared functionality for PipeMapper, PipeReducer. @@ -366,7 +366,7 @@ OutputReader createOutputReader(Class outputReaderClass) } - class MROutputThread extends Thread { + class MROutputThread extends HadoopThread { MROutputThread(OutputReader outReader, OutputCollector outCollector, Reporter reporter) { @@ -376,7 +376,7 @@ class MROutputThread extends Thread { this.reporter = reporter; } - public void run() { + public void work() { try { // 3/4 Tool to Hadoop while (outReader.readKeyValue()) { @@ -418,7 +418,7 @@ public void run() { } - class MRErrorThread extends Thread { + class MRErrorThread extends HadoopThread { public MRErrorThread() { this.reporterPrefix = job_.get("stream.stderr.reporter.prefix", "reporter:"); @@ -431,7 +431,7 @@ public void setReporter(Reporter reporter) { this.reporter = reporter; } - public void run() { + public void work() { Text line = new Text(); LineReader lineReader = null; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index d6ec1e4d4c516..d745d187c3b96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -69,6 +69,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; @@ -1761,7 +1762,7 @@ Thread createLaunchContainerThread(Container allocatedContainer, LaunchContainerRunnable runnableLaunchContainer = new LaunchContainerRunnable(allocatedContainer, containerListener, shellId); - return new Thread(runnableLaunchContainer); + return new HadoopThread(runnableLaunchContainer); } private void publishContainerStartEventOnTimelineServiceV2( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java index 607a4c90d7e93..7fc42f73542f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java @@ -53,6 +53,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -333,7 +334,7 @@ protected void baseTestDSShell(String methodName, boolean haveDomain, boolean de assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { try { result.set(dsClient.run()); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java index 6ef26ed1cce72..44018ad852e2b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -126,7 +127,7 @@ public void testDSShellWithEnforceExecutionType(TestInfo testInfo) throws Except try { setAndGetDSClient(new Configuration(getYarnClusterConfiguration())); getDSClient().init(args); - Thread dsClientRunner = new Thread(() -> { + Thread dsClientRunner = new HadoopThread(() -> { try { getDSClient().run(); } catch (Exception e) { @@ -220,7 +221,7 @@ private void doTestDistributedShellWithResources( assertTrue(getDSClient().init(args)); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); - Thread dsClientRunner = new Thread(() -> { + Thread dsClientRunner = new HadoopThread(() -> { try { result.set(getDSClient().run()); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java index 7ebc747ed2ea1..0253126274501 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.client.api.YarnClient; @@ -320,7 +321,7 @@ public void testDistributedShellWithAllocationTagNamespace( new Client( new Configuration(distShellTest.getYarnClusterConfiguration())); dsClient.init(argsA); - Thread dsClientRunner = new Thread(() -> { + Thread dsClientRunner = new HadoopThread(() -> { try { dsClient.run(); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java index 37b3477c8c187..30e5162c20a37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -242,9 +243,9 @@ public void launchAM(ApplicationAttemptId attemptId) // read error and input streams as this would free up the buffers // free the error stream buffer - Thread errThread = new Thread() { + Thread errThread = new HadoopThread() { @Override - public void run() { + public void work() { try { String line = errReader.readLine(); while((line != null) && !isInterrupted()) { @@ -256,9 +257,9 @@ public void run() { } } }; - Thread outThread = new Thread() { + Thread outThread = new HadoopThread() { @Override - public void run() { + public void work() { try { String line = inReader.readLine(); while((line != null) && !isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java index f971d7140aa44..613eb8a089424 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.service.SystemServiceManager; @@ -127,7 +128,7 @@ protected void serviceStart() throws Exception { launchUserService(syncUserServices); // Create a thread and submit services in background otherwise it // block RM switch time. - serviceLaucher = new Thread(createRunnable()); + serviceLaucher = new HadoopThread(createRunnable()); serviceLaucher.setName("System service launcher"); serviceLaucher.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java index 2a9bf8d5d975f..ebb766903c891 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java @@ -26,6 +26,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; @@ -154,9 +155,9 @@ public StopResponseProto stop(StopRequestProto requestProto) // Stop the service in 2 seconds delay to make sure this rpc call is completed. // shutdown hook will be executed which will stop AM gracefully. - Thread thread = new Thread() { + Thread thread = new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(2000); ExitUtil.terminate(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java index 5656484fca126..89c01f9bf8d53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.eclipse.jetty.websocket.api.Session; import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose; import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect; @@ -85,7 +86,7 @@ public void onClose(Session session, int status, String reason) { public void run() { try { Reader consoleReader = new Reader(); - Thread inputThread = new Thread(consoleReader, "consoleReader"); + Thread inputThread = new HadoopThread(consoleReader, "consoleReader"); inputThread.start(); while (mySession.isOpen()) { mySession.getRemote().flush(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java index 376c9dc1b05c1..2fda78d5de474 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.Container; @@ -293,12 +294,12 @@ public void updateTrackingUrl(String trackingUrl) { client.updateTrackingUrl(trackingUrl); } - private class HeartbeatThread extends Thread { + private class HeartbeatThread extends HadoopThread { public HeartbeatThread() { super("AMRM Heartbeater thread"); } - public void run() { + public void work() { while (true) { Object response = null; // synchronization ensures we don't send heartbeats after unregistering @@ -337,12 +338,12 @@ public void run() { } } - private class CallbackHandlerThread extends Thread { + private class CallbackHandlerThread extends HadoopThread { public CallbackHandlerThread() { super("AMRM Callback Handler Thread"); } - public void run() { + public void work() { while (true) { if (!keepRunning) { return; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java index 4a4c50607dab7..e0e737017d777 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java @@ -59,6 +59,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,7 +74,7 @@ public class NMClientAsyncImpl extends NMClientAsync { protected ThreadPoolExecutor threadPool; protected int maxThreadPoolSize; - protected Thread eventDispatcherThread; + protected HadoopThread eventDispatcherThread; protected AtomicBoolean stopped = new AtomicBoolean(false); protected BlockingQueue events = new LinkedBlockingQueue(); @@ -151,9 +152,9 @@ protected void serviceStart() throws Exception { threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventDispatcherThread = new Thread() { + eventDispatcherThread = new HadoopThread() { @Override - public void run() { + public void work() { ContainerEvent event = null; Set allNodes = new HashSet(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java index c16fe03b82a43..a3a536ad46391 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java @@ -63,6 +63,7 @@ import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.Priority; @@ -367,9 +368,9 @@ private static class QueueMetrics { long pendingContainers; } - private class KeyboardMonitor extends Thread { + private class KeyboardMonitor extends HadoopThread { - public void run() { + public void work() { Scanner keyboard = new Scanner(System.in, "UTF-8"); while (runKeyboardMonitor.get()) { String in = keyboard.next(); @@ -1229,7 +1230,7 @@ private String getCommandOutput(String[] command) throws IOException, private void addShutdownHook() { //clear screen when the program exits - Runtime.getRuntime().addShutdownHook(new Thread(() -> { + Runtime.getRuntime().addShutdownHook(new HadoopThread(() -> { clearScreen(); })); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java index 165569df4c736..0a1a75ab66ee2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; @@ -160,7 +161,7 @@ private void testProxyProvider(boolean facadeFlushCache) throws Exception { .getSubClusters(any(GetSubClustersInfoRequest.class)); threadResponse = null; - Thread thread = new Thread(new Runnable() { + Thread thread = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java index 963d01b4c90ff..4b64e68908a05 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java @@ -41,6 +41,7 @@ import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -402,7 +403,7 @@ public void testUncaughtExceptionHandlerWithHAEnabled() // Create a thread and throw a RTE inside it final RuntimeException rte = new RuntimeException("TestRuntimeException"); - final Thread testThread = new Thread(new Runnable() { + final Thread testThread = new HadoopThread(new Runnable() { @Override public void run() { throw rte; @@ -446,7 +447,7 @@ public void testUncaughtExceptionHandlerWithoutHA() // Create a thread and throw a RTE inside it final RuntimeException rte = new RuntimeException("TestRuntimeException"); - final Thread testThread = new Thread(new Runnable() { + final Thread testThread = new HadoopThread(new Runnable() { @Override public void run() { throw rte; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java index 2da782ea6db1c..7518ac794c27d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java @@ -34,11 +34,11 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.concurrent.Callable; import java.util.concurrent.TimeoutException; import java.util.function.Supplier; import java.util.HashMap; @@ -1923,9 +1923,9 @@ public void testAMRMClientOnAMRMTokenRollOver(String pSchedulerName, AllocateRequest request = Records.newRecord(AllocateRequest.class); request.setResponseId(response.getResponseId()); - testUser1.doAs(new PrivilegedAction() { + testUser1.callAsNoException(new Callable() { @Override - public ApplicationMasterProtocol run() { + public ApplicationMasterProtocol call() { return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy( ApplicationMasterProtocol.class, yarnCluster.getResourceManager().getApplicationMasterService() @@ -1956,9 +1956,9 @@ public ApplicationMasterProtocol run() { SecurityUtil.setTokenService(amrmToken_2, yarnCluster .getResourceManager().getApplicationMasterService().getBindAddress()); testUser2.addToken(amrmToken_2); - testUser2.doAs(new PrivilegedAction() { + testUser2.callAsNoException(new Callable() { @Override - public ApplicationMasterProtocol run() { + public ApplicationMasterProtocol call() { return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy( ApplicationMasterProtocol.class, yarnCluster.getResourceManager().getApplicationMasterService() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java index 02edda63f6c6b..3355a046eed88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java @@ -19,12 +19,12 @@ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.concurrent.Callable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -521,9 +521,9 @@ public void testAMRMClientOnAMRMTokenRollOverOnRMRestart() throws Exception { SecurityUtil.setTokenService(token, rm2.getApplicationMasterService() .getBindAddress()); testUser.addToken(token); - testUser.doAs(new PrivilegedAction() { + testUser.callAsNoException(new Callable() { @Override - public ApplicationMasterProtocol run() { + public ApplicationMasterProtocol call() { return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy( ApplicationMasterProtocol.class, rm2.getApplicationMasterService().getBindAddress(), conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index 1cfedf5493e73..2662d3f86735b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -76,6 +76,10 @@ org.eclipse.jetty jetty-util + + org.eclipse.jetty + jetty-servlet + org.apache.hadoop.thirdparty hadoop-shaded-guava @@ -121,6 +125,14 @@ protobuf-java ${transient.protobuf2.scope} + + + com.sun.xml.bind + jaxb-impl + + 2.3.9 + test + org.bouncycastle bcprov-jdk18on diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java index e2978b7042ffb..6f7d7740d5c2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; +import java.util.concurrent.Callable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,10 +47,10 @@ public static T createAHSProxy(final Configuration conf, protected static T getProxy(final Configuration conf, final Class protocol, final InetSocketAddress rmAddress) throws IOException { - return UserGroupInformation.getCurrentUser().doAs( - new PrivilegedAction() { + return UserGroupInformation.getCurrentUser().callAsNoException( + new Callable() { @Override - public T run() { + public T call() { return (T) YarnRPC.create(conf).getProxy(protocol, rmAddress, conf); } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java index 4b4d61ed6c201..7e8705776771b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java @@ -26,9 +26,9 @@ import java.net.SocketException; import java.net.SocketTimeoutException; import java.net.UnknownHostException; -import java.security.PrivilegedAction; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; @@ -209,10 +209,10 @@ private static T newProxyInstance(final YarnConfiguration conf, public T getProxy(final Configuration conf, final Class protocol, final InetSocketAddress rmAddress) throws IOException { - return user.doAs( - new PrivilegedAction() { + return user.callAsNoException( + new Callable() { @Override - public T run() { + public T call() { return (T) YarnRPC.create(conf).getProxy(protocol, rmAddress, conf); } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java index 878362e83aef2..727d9b58d3ce6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java @@ -24,9 +24,9 @@ import java.net.NoRouteToHostException; import java.net.SocketException; import java.net.UnknownHostException; -import java.security.PrivilegedAction; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.InterfaceAudience.Public; @@ -96,9 +96,9 @@ protected static T createRetriableProxy(final Configuration conf, final Class protocol, final UserGroupInformation user, final YarnRPC rpc, final InetSocketAddress serverAddress, RetryPolicy retryPolicy) { - T proxy = user.doAs(new PrivilegedAction() { + T proxy = user.callAsNoException(new Callable() { @Override - public T run() { + public T call() { return (T) rpc.getProxy(protocol, serverAddress, conf); } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index a3436f7bbe0ba..d71b06a3f94af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -42,6 +42,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -218,7 +219,7 @@ protected void serviceInit(Configuration conf) throws Exception{ protected void serviceStart() throws Exception { //start all the components super.serviceStart(); - eventHandlingThread = new Thread(createThread()); + eventHandlingThread = new HadoopThread(createThread()); eventHandlingThread.setName(dispatcherThreadName); eventHandlingThread.start(); } @@ -284,7 +285,7 @@ protected void dispatch(Event event) { && (ShutdownHookManager.get().isShutdownInProgress()) == false && stopped == false) { stopped = true; - Thread shutDownThread = new Thread(createShutDownThread()); + Thread shutDownThread = new HadoopThread(createShutDownThread()); shutDownThread.setName("AsyncDispatcher ShutDown handler"); shutDownThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index 647ab6e9481f5..e4a7024c8e47b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -28,6 +28,7 @@ import org.slf4j.MarkerFactory; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import java.util.concurrent.BlockingQueue; @@ -105,7 +106,7 @@ public void run() { public EventDispatcher(EventHandler handler, String name) { super(name); this.handler = handler; - this.eventProcessor = new Thread(new EventProcessor()); + this.eventProcessor = new HadoopThread(new EventProcessor()); this.eventProcessor.setName(getName() + ":Event Processor"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java index 0ae6c47d0ecd7..809aa8e34ff63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * A simple liveliness monitor with which clients can register, trust the @@ -66,7 +67,7 @@ public AbstractLivelinessMonitor(String name) { protected void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; resetTimer(); - checkerThread = new Thread(new PingChecker()); + checkerThread = new HadoopThread(new PingChecker()); checkerThread.setName("Ping Checker for "+getName()); checkerThread.start(); super.serviceStart(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java index e0201cfcd1557..aaf13ca12761e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java @@ -21,6 +21,7 @@ import org.junit.jupiter.api.Test; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import static org.junit.jupiter.api.Assertions.assertSame; @@ -44,7 +45,7 @@ void testUncaughtExceptionHandlerWithRuntimeException() final YarnUncaughtExceptionHandler spyYarnHandler = spy(exHandler); final YarnRuntimeException yarnException = new YarnRuntimeException( "test-yarn-runtime-exception"); - final Thread yarnThread = new Thread(new Runnable() { + final Thread yarnThread = new HadoopThread(new Runnable() { @Override public void run() { throw yarnException; @@ -74,7 +75,7 @@ void testUncaughtExceptionHandlerWithError() ExitUtil.disableSystemExit(); final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler); final java.lang.Error error = new java.lang.Error("test-error"); - final Thread errorThread = new Thread(new Runnable() { + final Thread errorThread = new HadoopThread(new Runnable() { @Override public void run() { throw error; @@ -103,7 +104,7 @@ void testUncaughtExceptionHandlerWithOutOfMemoryError() ExitUtil.disableSystemHalt(); final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler); final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error"); - final Thread oomThread = new Thread(new Runnable() { + final Thread oomThread = new HadoopThread(new Runnable() { @Override public void run() { throw oomError; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java index 78741720a171d..0e1ca1be583a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.commons.collections4.map.LRUMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -283,7 +284,7 @@ public StartAndInsertTime(long startTime, long insertTime) { } } - private class EntityDeletionThread extends Thread { + private class EntityDeletionThread extends HadoopThread { private final long ttl; private final long ttlInterval; @@ -298,7 +299,7 @@ public EntityDeletionThread(Configuration conf) { } @Override - public void run() { + public void work() { while (true) { long timestamp = System.currentTimeMillis() - ttl; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java index 97ff86ede271b..cd7b9cb77e75b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import java.io.IOException; import java.util.ArrayList; @@ -389,7 +390,7 @@ protected void serviceStop() throws Exception { super.serviceStop(); } - private class EntityDeletionThread extends Thread { + private class EntityDeletionThread extends HadoopThread { private final long ttl; private final long ttlInterval; @@ -404,7 +405,7 @@ private class EntityDeletionThread extends Thread { } @Override - public void run() { + public void work() { Thread.currentThread().setName("Leveldb Timeline Store Retention"); while (true) { long timestamp = System.currentTimeMillis() - ttl; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java index 82e018a5404e8..98594a73745a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java @@ -189,7 +189,7 @@ public static void tearDown() throws Exception { @ParameterizedTest void testPutTimelineEntities(boolean isSslEnabled) throws Exception { initTestTimelineAuthenticationFilterForV1(isSslEnabled); - KerberosTestUtils.doAs(PRINCIPAL, new Callable() { + KerberosTestUtils.callAs(PRINCIPAL, new Callable() { @Override public Void call() throws Exception { TimelineClient client = createTimelineClientForUGI(); @@ -218,7 +218,7 @@ public Void call() throws Exception { @ParameterizedTest void testPutDomains(boolean isSslEnabled) throws Exception { initTestTimelineAuthenticationFilterForV1(isSslEnabled); - KerberosTestUtils.doAs(PRINCIPAL, new Callable() { + KerberosTestUtils.callAs(PRINCIPAL, new Callable() { @Override public Void call() throws Exception { TimelineClient client = createTimelineClientForUGI(); @@ -243,7 +243,7 @@ public Void call() throws Exception { void testDelegationTokenOperations(boolean isSslEnabled) throws Exception { initTestTimelineAuthenticationFilterForV1(isSslEnabled); TimelineClient httpUserClient = - KerberosTestUtils.doAs(PRINCIPAL, + KerberosTestUtils.callAs(PRINCIPAL, new Callable() { @Override public TimelineClient call() throws Exception { @@ -251,7 +251,7 @@ public TimelineClient call() throws Exception { } }); UserGroupInformation httpUser = - KerberosTestUtils.doAs(PRINCIPAL, + KerberosTestUtils.callAs(PRINCIPAL, new Callable() { @Override public UserGroupInformation call() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java index cb59d41505deb..f2110da05eccf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java @@ -34,13 +34,14 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * Extends Thread and provides an implementation that is used for processing the * AM heart beat request asynchronously and sending back the response using the * callback method registered with the system. */ -public class AMHeartbeatRequestHandler extends Thread { +public class AMHeartbeatRequestHandler extends HadoopThread { public static final Logger LOG = LoggerFactory.getLogger(AMHeartbeatRequestHandler.class); @@ -83,7 +84,7 @@ public void shutdown() { } @Override - public void run() { + public void work() { while (keepRunning) { AsyncAllocateRequestInfo requestInfo; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java index 52e7249f9b3b8..ab1127603e25a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java @@ -19,11 +19,11 @@ package org.apache.hadoop.yarn.server.federation.utils; import java.io.IOException; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import org.apache.commons.collections4.MapUtils; @@ -250,9 +250,9 @@ private String readRegistry(final RegistryOperations registryImpl, UserGroupInformation ugi, final String key, final boolean throwIfFails) throws YarnException { // Use the ugi loaded with app credentials to access registry - String result = ugi.doAs(new PrivilegedAction() { + String result = ugi.callAsNoException(new Callable() { @Override - public String run() { + public String call() { try { ServiceRecord value = registryImpl.resolve(key); if (value != null) { @@ -276,9 +276,9 @@ private void removeKeyRegistry(final RegistryOperations registryImpl, UserGroupInformation ugi, final String key, final boolean recursive, final boolean throwIfFails) throws YarnException { // Use the ugi loaded with app credentials to access registry - boolean success = ugi.doAs(new PrivilegedAction() { + boolean success = ugi.callAsNoException(new Callable() { @Override - public Boolean run() { + public Boolean call() { try { registryImpl.delete(key, recursive); return true; @@ -305,9 +305,9 @@ private void writeRegistry(final RegistryOperations registryImpl, final ServiceRecord recordValue = new ServiceRecord(); recordValue.description = value; // Use the ugi loaded with app credentials to access registry - boolean success = ugi.doAs(new PrivilegedAction() { + boolean success = ugi.callAsNoException(new Callable() { @Override - public Boolean run() { + public Boolean call() { try { registryImpl.bind(key, recordValue, BindFlags.OVERWRITE); return true; @@ -330,7 +330,7 @@ public Boolean run() { private List listDirRegistry(final RegistryOperations registryImpl, UserGroupInformation ugi, final String key, final boolean throwIfFails) throws YarnException { - List result = ugi.doAs((PrivilegedAction>) () -> { + List result = ugi.callAsNoException((Callable>) () -> { try { return registryImpl.list(key); } catch (Throwable e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java index 0ff4260c5e358..f1a385d2b1452 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java @@ -37,6 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -105,7 +106,7 @@ protected void serviceStart() throws Exception { protected void serviceStop() throws Exception { if (!this.unmanagedAppMasterMap.isEmpty()) { - finishApplicationThread = new Thread(createForceFinishApplicationThread()); + finishApplicationThread = new HadoopThread(createForceFinishApplicationThread()); finishApplicationThread.setName(dispatcherThreadName); finishApplicationThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/BaseFederationPoliciesTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/BaseFederationPoliciesTest.java index 8d6fc50c6c982..9afc9c1e12201 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/BaseFederationPoliciesTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/BaseFederationPoliciesTest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.federation.policies; +import static org.junit.Assume.assumeNoException; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assumptions.assumeTrue; import static org.mockito.Mockito.mock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java index bb6944e1034e0..9e0a899d718f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java @@ -33,6 +33,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; @@ -228,7 +229,7 @@ public void testSlowRegisterCall() throws YarnException, IOException, InterruptedException { // Register with wait() in RM in a separate thread - Thread registerAMThread = new Thread(new Runnable() { + Thread registerAMThread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -486,10 +487,10 @@ public TestableAMRequestHandlerThread(Configuration conf, } @Override - public void run() { + public void work() { try { getUGIWithToken(attemptId).doAs((PrivilegedExceptionAction) () -> { - TestableAMRequestHandlerThread.super.run(); + TestableAMRequestHandlerThread.super.work(); return null; }); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 76d3439575c55..ff941fcfe2112 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -68,6 +68,7 @@ import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDERR; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDOUT; @@ -851,7 +852,7 @@ public String getProcessId(ContainerId containerID) { * This class will signal a target container after a specified delay. * @see #signalContainer */ - public static class DelayedProcessKiller extends Thread { + public static class DelayedProcessKiller extends HadoopThread { private final Container container; private final String user; private final String pid; @@ -883,7 +884,7 @@ public DelayedProcessKiller(Container container, String user, String pid, } @Override - public void run() { + public void work() { try { Thread.sleep(delay); containerExecutor.signalContainer(new ContainerSignalContext.Builder() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 6110e624f8d37..4019efc3c7ca0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -534,9 +535,9 @@ public String getName() { } protected void shutDown(final int exitCode) { - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { NodeManager.this.stop(); } catch (Throwable t) { @@ -559,9 +560,9 @@ protected void resyncWithRM() { // Some other thread is already created for resyncing, do nothing } else { // We have got the lock, create a new thread - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { if (!rmWorkPreservingRestartEnabled) { LOG.info("Cleaning up running containers on resync"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java index 37fa33e14fcce..b83fe4944a36f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.api.records.ResourceUtilization; @@ -149,7 +150,7 @@ protected void serviceStop() throws Exception { /** * Thread that monitors the resource utilization of this node. */ - private class MonitoringThread extends Thread { + private class MonitoringThread extends HadoopThread { /** * Initialize the node resource monitoring thread. */ @@ -162,7 +163,7 @@ public MonitoringThread() { * Periodically monitor the resource utilization of the node. */ @Override - public void run() { + public void work() { while (true) { // Get node utilization and save it into the health status long pmem = resourceCalculatorPlugin.getPhysicalMemorySize() - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 5da709c49dc2b..a12742e4eae90 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -44,6 +44,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -329,7 +330,7 @@ protected void rebootNodeStatusUpdaterAndRegisterWithRM() { try { statusUpdater.join(); registerWithRM(); - statusUpdater = new Thread(statusUpdaterRunnable, "Node Status Updater"); + statusUpdater = new HadoopThread(statusUpdaterRunnable, "Node Status Updater"); this.isStopped = false; statusUpdater.start(); LOG.info("NodeStatusUpdater thread is reRegistered and restarted"); @@ -828,7 +829,7 @@ private static Map parseCredentials( protected void startStatusUpdater() { statusUpdaterRunnable = new StatusUpdaterRunnable(); statusUpdater = - new Thread(statusUpdaterRunnable, "Node Status Updater"); + new HadoopThread(statusUpdaterRunnable, "Node Status Updater"); statusUpdater.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java index 78ba39ef69380..e5fcf92ae7e52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java @@ -52,6 +52,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.CommandExecutor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; @@ -497,10 +498,10 @@ public void validateResult() throws IOException { private Thread startStreamReader(final InputStream stream) throws IOException { - Thread streamReaderThread = new Thread() { + Thread streamReaderThread = new HadoopThread() { @Override - public void run() { + public void work() { try (BufferedReader lines = new BufferedReader( new InputStreamReader(stream, StandardCharsets.UTF_8))) { char[] buf = new char[512]; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index cdd9fc916e339..6c9a0e69583cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -48,6 +48,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -1749,9 +1750,9 @@ private void doRelaunch(final ContainerImpl container, container.sendRelaunchEvent(); } else { // wait for some time, then send launch event - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(retryInterval); container.sendRelaunchEvent(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java index e942983e01168..f381b2a514a09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java @@ -23,6 +23,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -55,7 +56,7 @@ * events of all the containers together, and if we go over the limit picks * a container to kill. The algorithm that picks the container is a plugin. */ -public class CGroupElasticMemoryController extends Thread { +public class CGroupElasticMemoryController extends HadoopThread { protected static final Logger LOG = LoggerFactory .getLogger(CGroupElasticMemoryController.class); private final Clock clock = new MonotonicClock(); @@ -238,7 +239,7 @@ public static boolean isAvailable() { * reasons. */ @Override - public void run() { + public void work() { ExecutorService executor = null; try { // Disable OOM killer and set a limit. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java index 0477e7320fee2..9bbc3c23c5b92 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java @@ -27,7 +27,6 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -191,9 +190,9 @@ public void runLocalization(final InetSocketAddress nmAddr) UserGroupInformation.createRemoteUser(user); remoteUser.addToken(creds.getToken(LocalizerTokenIdentifier.KIND)); final LocalizationProtocol nodeManager = - remoteUser.doAs(new PrivilegedAction() { + remoteUser.callAsNoException(new Callable() { @Override - public LocalizationProtocol run() { + public LocalizationProtocol call() { return getProxy(nmAddr); } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index a7f0722e66f8e..74cbc90124876 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -80,6 +80,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -861,7 +862,7 @@ private static ExecutorService createLocalizerExecutor(Configuration conf) { } - class PublicLocalizer extends Thread { + class PublicLocalizer extends HadoopThread { final FileContext lfs; final Configuration conf; @@ -975,7 +976,7 @@ private void createDir(Path dirPath, FsPermission perms) } @Override - public void run() { + public void work() { try { // TODO shutdown, better error handling esp. DU while (!Thread.currentThread().isInterrupted()) { @@ -1030,7 +1031,7 @@ public void run() { * access to user's credentials. One {@link LocalizerRunner} per localizerId. * */ - class LocalizerRunner extends Thread { + class LocalizerRunner extends HadoopThread { final LocalizerContext context; final String localizerId; @@ -1254,7 +1255,7 @@ private Path getPathForLocalization(LocalResource rsrc, @Override @SuppressWarnings("unchecked") // dispatcher not typed - public void run() { + public void work() { Path nmPrivateCTokensPath = null; Throwable exception = null; try { @@ -1405,7 +1406,7 @@ static String buildTokenFingerprint(Token tk) return fingerprint.toString(); } - static class CacheCleanup extends Thread { + static class CacheCleanup extends HadoopThread { private final Dispatcher dispatcher; @@ -1416,7 +1417,7 @@ public CacheCleanup(Dispatcher dispatcher) { @Override @SuppressWarnings("unchecked") // dispatcher not typed - public void run() { + public void work() { dispatcher.getEventHandler().handle( new LocalizationEvent(LocalizationEventType.CACHE_CLEANUP)); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 0b4bd4a3fbd81..608801d5e3cc6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; @@ -489,13 +490,13 @@ boolean isProcessTreeOverLimit(ResourceCalculatorProcessTree pTree, curMemUsageOfAgedProcesses, limit); } - private class MonitoringThread extends Thread { + private class MonitoringThread extends HadoopThread { MonitoringThread() { super("Container Monitor"); } @Override - public void run() { + public void work() { while (!stopped && !Thread.currentThread().isInterrupted()) { long start = Time.monotonicNow(); @@ -884,13 +885,13 @@ private String formatUsageString(long currentVmemUsage, long vmemLimit, } } - private class LogMonitorThread extends Thread { + private class LogMonitorThread extends HadoopThread { LogMonitorThread() { super("Container Log Monitor"); } @Override - public void run() { + public void work() { while (!stopped && !Thread.currentThread().isInterrupted()) { for (Entry entry : trackingContainers.entrySet()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java index f34983eabfff4..55359756285dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java @@ -25,13 +25,13 @@ import java.io.PrintWriter; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; @@ -232,9 +232,9 @@ public static void startContainer(NodeManager nm, ContainerId cId, currentUser.addToken(nmToken); ContainerManagementProtocol containerManager = - currentUser.doAs(new PrivilegedAction() { + currentUser.callAsNoException(new Callable() { @Override - public ContainerManagementProtocol run() { + public ContainerManagementProtocol call() { Configuration conf = new Configuration(); YarnRPC rpc = YarnRPC.create(conf); InetSocketAddress containerManagerBindAddress = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 89010bb3342e9..dbcf61ec7e809 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -69,6 +69,7 @@ import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1173,7 +1174,7 @@ protected NodeStatusUpdater createNodeStatusUpdater(Context context, assertTrue(lastService instanceof NodeStatusUpdater, "last service is NOT the node status updater"); - Thread starterThread = new Thread(() -> { + Thread starterThread = new HadoopThread(() -> { try { nm.start(); } catch (Throwable e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java index 5172e12b64e7b..984edf296bc8a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java @@ -288,11 +288,11 @@ public TestableAMRequestHandlerThread(Configuration conf, } @Override - public void run() { + public void work() { try { getUGIWithToken(getAttemptId()) .doAs((PrivilegedExceptionAction) () -> { - TestableAMRequestHandlerThread.super.run(); + TestableAMRequestHandlerThread.super.work(); return null; }); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index b35f8cb295cae..4fda747e9c824 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -57,6 +57,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1135,7 +1136,7 @@ private class SchedulerEventDispatcher extends SchedulerEventDispatcher(String name, int samplesPerMin) { super(scheduler, name); this.eventProcessorMonitor = - new Thread(new EventProcessorMonitor(getEventProcessorId(), + new HadoopThread(new EventProcessorMonitor(getEventProcessorId(), samplesPerMin)); this.eventProcessorMonitor .setName("ResourceManager Event Processor Monitor"); @@ -1220,7 +1221,7 @@ protected void serviceStop() throws Exception { */ private void handleTransitionToStandByInNewThread() { Thread standByTransitionThread = - new Thread(activeServices.standByTransitionRunnable); + new HadoopThread(activeServices.standByTransitionRunnable); standByTransitionThread.setName("StandByTransitionThread"); standByTransitionThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java index 9f4de2868a1fd..928dc74ff21ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java @@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -105,14 +106,14 @@ protected void serviceStop() throws Exception { launcherPool.shutdown(); } - private class LauncherThread extends Thread { + private class LauncherThread extends HadoopThread { public LauncherThread() { super("ApplicationMaster Launcher"); } @Override - public void run() { + public void work() { while (!this.isInterrupted()) { Runnable toLaunch; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java index 6384736d62e11..7f6424d3e50b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java @@ -31,6 +31,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -597,7 +598,7 @@ public int incrementCurrentKeyId() { */ public void createCleanUpFinishApplicationThread(String stage) { String threadName = cleanUpThreadNamePrefix + "-" + stage; - Thread finishApplicationThread = new Thread(createCleanUpFinishApplicationThread()); + Thread finishApplicationThread = new HadoopThread(createCleanUpFinishApplicationThread()); finishApplicationThread.setName(threadName); finishApplicationThread.start(); LOG.info("CleanUpFinishApplicationThread has been started {}.", threadName); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java index f1b80a946a7d0..09e8e4c872126 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java @@ -29,6 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -536,13 +537,13 @@ public void handle(TimelineV1PublishEvent event) { } } - private class PutEventThread extends Thread { + private class PutEventThread extends HadoopThread { PutEventThread() { super("PutEventThread"); } @Override - public void run() { + public void work() { LOG.info("System metrics publisher will put events every " + String.valueOf(putEventInterval) + " milliseconds"); while (!stopped && !Thread.currentThread().isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java index b7d1220bf9f2d..e132b54c1d01d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java @@ -27,6 +27,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.classification.VisibleForTesting; @@ -70,7 +71,7 @@ public void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; ses = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setName(getName()); return t; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index f0990cf8fb0a6..241d16225982e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -30,6 +30,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.util.curator.ZKCuratorManager.SafeTransaction; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -1468,13 +1469,13 @@ public void safeDeleteAndCheckNode(String path, List fencingACL, * Helper class that periodically attempts creating a znode to ensure that * this RM continues to be the Active. */ - private class VerifyActiveStatusThread extends Thread { + private class VerifyActiveStatusThread extends HadoopThread { VerifyActiveStatusThread() { super(VerifyActiveStatusThread.class.getName()); } @Override - public void run() { + public void work() { try { while (!isFencedState()) { // Create and delete fencing node diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 6010bd21a186e..0f6e7999a9e55 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -45,6 +45,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringInterner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1238,9 +1239,9 @@ public RMAppAttemptState transition(RMAppAttemptImpl appAttempt, private void retryFetchingAMContainer(final RMAppAttemptImpl appAttempt) { // start a new thread so that we are not blocking main dispatcher thread. - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(500); } catch (InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 3343c5f93118d..7bfe8f93d77f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -117,6 +117,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.util.concurrent.HadoopThread; @SuppressWarnings("unchecked") @@ -1716,9 +1717,9 @@ public void update() { * Thread which calls {@link #update()} every * updateInterval milliseconds. */ - private class UpdateThread extends Thread { + private class UpdateThread extends HadoopThread { @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { try { synchronized (updateThreadMonitor) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index 001c638801bf5..b9693e153bb65 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -21,6 +21,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; @@ -295,7 +296,7 @@ private void dynamicallyUpdateAppActivitiesMaxQueueLengthIfNeeded() { @Override protected void serviceStart() throws Exception { - cleanUpThread = new Thread(new Runnable() { + cleanUpThread = new HadoopThread(new Runnable() { @Override public void run() { while (!stopped && !Thread.currentThread().isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 02ffe83a6df7c..1f26e24abcb02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -53,6 +53,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; @@ -638,7 +639,7 @@ public void setAsyncSchedulingConf(AsyncSchedulingConfiguration conf) { this.asyncSchedulingConf = conf; } - static class AsyncScheduleThread extends Thread { + static class AsyncScheduleThread extends HadoopThread { private final CapacityScheduler cs; private AtomicBoolean runSchedules = new AtomicBoolean(false); @@ -650,7 +651,7 @@ public AsyncScheduleThread(CapacityScheduler cs) { } @Override - public void run() { + public void work() { int debuggingLogCounter = 0; while (!Thread.currentThread().isInterrupted()) { try { @@ -691,7 +692,7 @@ public void suspendSchedule() { } - static class ResourceCommitterService extends Thread { + static class ResourceCommitterService extends HadoopThread { private final CapacityScheduler cs; private BlockingQueue> backlogs = new LinkedBlockingQueue<>(); @@ -702,7 +703,7 @@ public ResourceCommitterService(CapacityScheduler cs) { } @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { try { ResourceCommitRequest request = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 7fab417d893cb..85e707b2f3ea2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -38,6 +38,7 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.XMLUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.security.AccessType; import org.apache.hadoop.yarn.security.Permission; @@ -118,7 +119,7 @@ public void serviceInit(Configuration conf) throws Exception { this.allocFile = getAllocationFile(conf); if (this.allocFile != null) { this.fs = allocFile.getFileSystem(conf); - reloadThread = new Thread(() -> { + reloadThread = new HadoopThread(() -> { while (running) { try { synchronized (this) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java index 221bb17ae5ba3..8047a96846690 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java @@ -19,6 +19,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Resource; @@ -39,7 +40,7 @@ /** * Thread that handles FairScheduler preemption. */ -class FSPreemptionThread extends Thread { +class FSPreemptionThread extends HadoopThread { private static final Logger LOG = LoggerFactory. getLogger(FSPreemptionThread.class); protected final FSContext context; @@ -71,7 +72,7 @@ class FSPreemptionThread extends Thread { } @Override - public void run() { + public void work() { while (!Thread.interrupted()) { try { FSAppAttempt starvedApp = context.getStarvedApps().take(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index a3e3ddfafe39f..6a15393d9cbfe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -100,6 +100,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import org.slf4j.Logger; @@ -315,10 +316,10 @@ public QueueManager getQueueManager() { * asynchronous to the node heartbeats. */ @Deprecated - private class ContinuousSchedulingThread extends Thread { + private class ContinuousSchedulingThread extends HadoopThread { @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { try { continuousSchedulingAttempt(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java index 38af12719efa0..d8251a384690f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -99,7 +100,7 @@ public void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; ses = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setName(getName()); return t; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index 8e1214afc8095..271ae991746de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -62,6 +62,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AbstractEvent; @@ -200,7 +201,7 @@ protected void serviceStart() throws Exception { dtCancelThread.start(); if (tokenKeepAliveEnabled) { delayedRemovalThread = - new Thread(new DelayedTokenRemovalRunnable(getConfig()), + new HadoopThread(new DelayedTokenRemovalRunnable(getConfig()), "DelayedTokenCanceller"); delayedRemovalThread.start(); } @@ -347,7 +348,7 @@ public int hashCode() { } - private static class DelegationTokenCancelThread extends Thread { + private static class DelegationTokenCancelThread extends HadoopThread { private static class TokenWithConf { Token token; Configuration conf; @@ -377,7 +378,7 @@ public void cancelToken(Token token, } } - public void run() { + public void work() { TokenWithConf tokenWithConf = null; while (true) { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java index 686754fa819ad..33d1ad70f5549 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java @@ -26,11 +26,11 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.Callable; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -307,9 +307,9 @@ public void testAuthorizedAccess(Configuration pConf) throws Exception { credentials.getAllTokens()); currentUser.addToken(amRMToken); ApplicationMasterProtocol client = currentUser - .doAs(new PrivilegedAction() { + .callAsNoException(new Callable() { @Override - public ApplicationMasterProtocol run() { + public ApplicationMasterProtocol call() { return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, rm .getApplicationMasterService().getBindAddress(), conf); } @@ -364,9 +364,9 @@ public void testUnauthorizedAccess(Configuration pConf) throws Exception { // First try contacting NM without tokens ApplicationMasterProtocol client = currentUser - .doAs(new PrivilegedAction() { + .callAsNoException(new Callable() { @Override - public ApplicationMasterProtocol run() { + public ApplicationMasterProtocol call() { return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, serviceAddr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java index 30c30963bd96e..20a47aa544bb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java @@ -32,8 +32,7 @@ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; -import java.security.PrivilegedExceptionAction; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.apache.hadoop.test.LambdaTestUtils; @@ -470,9 +469,9 @@ private org.apache.hadoop.yarn.api.records.Token getDelegationToken( final ApplicationClientProtocol clientRMService, final String renewerString) throws IOException, InterruptedException { org.apache.hadoop.yarn.api.records.Token token = loggedInUser - .doAs(new PrivilegedExceptionAction() { + .callAsNoException(new Callable() { @Override - public org.apache.hadoop.yarn.api.records.Token run() + public org.apache.hadoop.yarn.api.records.Token call() throws YarnException, IOException { GetDelegationTokenRequest request = Records .newRecord(GetDelegationTokenRequest.class); @@ -488,9 +487,9 @@ private long renewDelegationToken(final UserGroupInformation loggedInUser, final ApplicationClientProtocol clientRMService, final org.apache.hadoop.yarn.api.records.Token dToken) throws IOException, InterruptedException { - long nextExpTime = loggedInUser.doAs(new PrivilegedExceptionAction() { + long nextExpTime = loggedInUser.callAsNoException(new Callable() { @Override - public Long run() throws YarnException, IOException { + public Long call() throws YarnException, IOException { RenewDelegationTokenRequest request = Records .newRecord(RenewDelegationTokenRequest.class); request.setDelegationToken(dToken); @@ -505,9 +504,9 @@ private void cancelDelegationToken(final UserGroupInformation loggedInUser, final ApplicationClientProtocol clientRMService, final org.apache.hadoop.yarn.api.records.Token dToken) throws IOException, InterruptedException { - loggedInUser.doAs(new PrivilegedExceptionAction() { + loggedInUser.callAsNoException(new Callable() { @Override - public Void run() throws YarnException, IOException { + public Void call() throws YarnException, IOException { CancelDelegationTokenRequest request = Records .newRecord(CancelDelegationTokenRequest.class); request.setDelegationToken(dToken); @@ -529,9 +528,9 @@ private ApplicationClientProtocol getClientRMProtocolWithDT( final YarnRPC rpc = YarnRPC.create(conf); ApplicationClientProtocol clientRMWithDT = ugi - .doAs(new PrivilegedAction() { + .callAsNoException(new Callable() { @Override - public ApplicationClientProtocol run() { + public ApplicationClientProtocol call() { return (ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class, rmAddress, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java index d15a02c778a86..7dc011cd1fcb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java @@ -24,6 +24,7 @@ import java.util.UUID; import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import static org.assertj.core.api.Assertions.assertThat; @@ -515,7 +516,7 @@ void stopActiveServices() { rm.adminService.transitionToActive(requestInfo); // 3. Try Transition to standby - Thread t = new Thread(new Runnable() { + Thread t = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java index 4895987be4c5a..ef4bc6de7d813 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java @@ -236,14 +236,16 @@ private void checkAsyncSchedulerThreads(Thread currentThread){ Thread resourceCommitterService = null; for (Thread thread : threads) { StackTraceElement[] stackTrace = thread.getStackTrace(); - if(stackTrace.length>0){ - String stackBottom = stackTrace[stackTrace.length-1].toString(); - if(stackBottom.contains("AsyncScheduleThread.run")){ - numAsyncScheduleThread++; - asyncScheduleThread = thread; - }else if(stackBottom.contains("ResourceCommitterService.run")){ - numResourceCommitterService++; - resourceCommitterService = thread; + if (stackTrace.length > 0) { + for (StackTraceElement elem : stackTrace) { + String line = elem.toString(); + if (line.contains("AsyncScheduleThread.work")) { + numAsyncScheduleThread++; + asyncScheduleThread = thread; + } else if (line.contains("ResourceCommitterService.work")) { + numResourceCommitterService++; + resourceCommitterService = thread; + } } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java index 5c78b231a1f95..dd0b91693051e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.monitor.invariants; +import static org.junit.Assume.assumeNotNull; + +import javax.script.ScriptEngineManager; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -57,6 +61,7 @@ public static void checkForJavaScript() { @BeforeEach public void setup() { + assumeNotNull(new ScriptEngineManager().getEngineByName("JavaScript")); this.metricsSystem = DefaultMetricsSystem.instance(); JvmMetrics.initSingleton("ResourceManager", null); this.ic = new MetricsInvariantChecker(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java index 15319a7e51585..92da1f682080c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -414,7 +415,7 @@ public void testFSRMStateStoreClientRetry() throws Exception { final AtomicBoolean assertionFailedInThread = new AtomicBoolean(false); cluster.shutdownNameNodes(); - Thread clientThread = new Thread(() -> { + Thread clientThread = new HadoopThread(() -> { try { store.storeApplicationStateInternal( ApplicationId.newInstance(100L, 1), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java index 64ac256275527..39dc7d823db15 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java @@ -24,6 +24,7 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; import org.apache.hadoop.test.MetricsAsserts; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -817,7 +818,7 @@ public void testQueueMetricsRaceCondition() throws InterruptedException { * simulate the concurrent calls for QueueMetrics#getQueueMetrics */ // thread A will keep querying the same queue metrics for a specified number of iterations - Thread threadA = new Thread(() -> { + Thread threadA = new HadoopThread(() -> { try { for (int i = 0; i < numIterations; i++) { QueueMetrics qm = QueueMetrics.getQueueMetrics().get(queueName); @@ -833,7 +834,7 @@ public void testQueueMetricsRaceCondition() throws InterruptedException { } }); // thread B will keep adding new queue metrics for a specified number of iterations - Thread threadB = new Thread(() -> { + Thread threadB = new HadoopThread(() -> { try { for (int i = 0; i < numIterations; i++) { QueueMetrics.getQueueMetrics().put("q" + i, metrics); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index 7bd465acadea7..b4c37c817f853 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -37,13 +37,13 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.concurrent.Callable; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; @@ -722,9 +722,9 @@ public void testValidateResourceBlacklistRequest() throws Exception { credentials.getAllTokens()); currentUser.addToken(amRMToken); ApplicationMasterProtocol client = - currentUser.doAs(new PrivilegedAction() { + currentUser.callAsNoException(new Callable() { @Override - public ApplicationMasterProtocol run() { + public ApplicationMasterProtocol call() { return (ApplicationMasterProtocol) rpc.getProxy( ApplicationMasterProtocol.class, rmBindAddress, yarnConf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index c77bb26de82cc..7965ec0b0983f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -81,7 +81,6 @@ import static org.mockito.Mockito.when; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -90,6 +89,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.Callable; import java.util.concurrent.CyclicBarrier; import org.apache.hadoop.util.Sets; @@ -111,6 +111,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.LocalConfigurationProvider; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; @@ -1024,9 +1025,9 @@ public void testAllocateDoesNotBlockOnSchedulerLock() throws Exception { credentials.getAllTokens()); currentUser.addToken(amRMToken); ApplicationMasterProtocol client = - currentUser.doAs(new PrivilegedAction() { + currentUser.callAsNoException(new Callable() { @Override - public ApplicationMasterProtocol run() { + public ApplicationMasterProtocol call() { return (ApplicationMasterProtocol) rpc.getProxy( ApplicationMasterProtocol.class, rmBindAddress, conf); } @@ -1064,7 +1065,7 @@ public ApplicationMasterProtocol run() { // grab the scheduler lock from another thread // and verify an allocate call in this thread doesn't block on it final CyclicBarrier barrier = new CyclicBarrier(2); - Thread otherThread = new Thread(new Runnable() { + Thread otherThread = new HadoopThread(new Runnable() { @Override public void run() { synchronized(cs) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java index 706cdc9034cea..5958f79971392 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java @@ -37,7 +37,7 @@ private MockPreemptionThread(FairScheduler scheduler) { } @Override - public void run() { + public void work() { while (!Thread.interrupted()) { try { FSAppAttempt app = context.getStarvedApps().take(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java index dc7a312b7e846..4da5704556875 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java @@ -32,9 +32,9 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collection; +import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicReference; import org.junit.jupiter.params.ParameterizedTest; @@ -436,9 +436,9 @@ protected AMRMTokenSecretManager createAMRMTokenSecretManager( private ApplicationMasterProtocol createRMClient(final MockRM rm, final Configuration conf, final YarnRPC rpc, UserGroupInformation currentUser) { - return currentUser.doAs(new PrivilegedAction() { + return currentUser.callAsNoException(new Callable() { @Override - public ApplicationMasterProtocol run() { + public ApplicationMasterProtocol call() { return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, rm .getApplicationMasterService().getBindAddress(), conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java index d5e2dd74d47e1..1e6c8205a3c1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java @@ -78,6 +78,7 @@ import java.security.PrivilegedExceptionAction; import java.util.Timer; import java.util.TimerTask; +import java.util.concurrent.Callable; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -240,10 +241,10 @@ protected void doSecureLogin() throws IOException { UserGroupInformation appUgi = UserGroupInformation.createRemoteUser(appAttempt.toString()); RegisterApplicationMasterResponse response = - appUgi.doAs(new PrivilegedAction() { + appUgi.callAsNoException(new Callable() { @Override - public RegisterApplicationMasterResponse run() { + public RegisterApplicationMasterResponse call() { RegisterApplicationMasterResponse response = null; try { response = mockAM.registerAppAttempt(); @@ -347,9 +348,9 @@ private void verifyTamperedToken(final Configuration conf, final CustomAM am, ugi.addToken(maliciousToken); try { - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { try { CustomProtocol client = RPC.getProxy(CustomProtocol.class, 1L, am.address, conf); @@ -391,9 +392,9 @@ private void verifyNewVersionToken(final Configuration conf, final CustomAM am, ugi.addToken(newToken); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { CustomProtocol client = RPC.getProxy(CustomProtocol.class, 1L, am.address, conf); client.ping(null, TestRpcBase.newEmptyRequest()); @@ -410,9 +411,9 @@ private void verifyValidToken(final Configuration conf, final CustomAM am, ugi = UserGroupInformation.createRemoteUser("me"); ugi.addToken(token); - ugi.doAs(new PrivilegedExceptionAction() { + ugi.callAs(new Callable() { @Override - public Void run() throws Exception { + public Void call() throws Exception { CustomProtocol client = RPC.getProxy(CustomProtocol.class, 1L, am.address, conf); client.ping(null, TestRpcBase.newEmptyRequest()); @@ -470,10 +471,10 @@ protected void doSecureLogin() throws IOException { UserGroupInformation appUgi = UserGroupInformation.createRemoteUser(appAttempt.toString()); RegisterApplicationMasterResponse response = - appUgi.doAs(new PrivilegedAction() { + appUgi.callAsNoException(new Callable() { @Override - public RegisterApplicationMasterResponse run() { + public RegisterApplicationMasterResponse call() { RegisterApplicationMasterResponse response = null; try { response = mockAM.registerAppAttempt(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java index c813f477f9d88..bb47505d7df16 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java @@ -415,7 +415,7 @@ public Void call() throws Exception { } // this should also fail due to client2 not being a superuser - KerberosTestUtils.doAs("client2@EXAMPLE.COM", new Callable() { + KerberosTestUtils.callAs("client2@EXAMPLE.COM", new Callable() { @Override public Void call() throws Exception { String renewer = "renewer"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java index 45b7cd3952579..3247412a98f50 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java @@ -319,7 +319,7 @@ private void verifyKerberosAuthCreate(String mType, String cType, final String contentType = cType; final String body = reqBody; final String renewer = renUser; - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { Principal principal1 = () -> "client@EXAMPLE.COM"; @@ -381,7 +381,7 @@ public void testRenewDelegationToken(int run) throws Exception { // test "client" and client2" trying to renew "client" token final DelegationToken responseToken = - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public DelegationToken call() throws Exception { Principal principal1 = () -> "client@EXAMPLE.COM"; @@ -407,7 +407,7 @@ public DelegationToken call() throws Exception { } }); - KerberosTestUtils.doAs(renewer, new Callable() { + KerberosTestUtils.callAs(renewer, new Callable() { @Override public DelegationToken call() throws Exception { Principal principal1 = () -> "client2@EXAMPLE.COM"; @@ -449,7 +449,7 @@ public DelegationToken call() throws Exception { }); // test unauthorized user renew attempt - KerberosTestUtils.doAs("client3", new Callable() { + KerberosTestUtils.callAs("client3", new Callable() { @Override public DelegationToken call() throws Exception { Principal principal1 = () -> "client3@EXAMPLE.COM"; @@ -469,7 +469,7 @@ public DelegationToken call() throws Exception { // test bad request - incorrect format, empty token string and random // token string - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { String token = "TEST_TOKEN_STRING"; @@ -546,7 +546,7 @@ public void testCancelDelegationToken(int run) throws Exception { for (final String contentType : mediaTypes) { // owner should be able to cancel delegation token - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { Principal principal1 = () -> "client@EXAMPLE.COM"; @@ -572,7 +572,7 @@ public Void call() throws Exception { // renewer should be able to cancel token final DelegationToken tmpToken = - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public DelegationToken call() throws Exception { Principal principal1 = () -> "client@EXAMPLE.COM"; @@ -588,7 +588,7 @@ public DelegationToken call() throws Exception { } }); - KerberosTestUtils.doAs(renewer, new Callable() { + KerberosTestUtils.callAs(renewer, new Callable() { @Override public Void call() throws Exception { Principal principal1 = () -> "client2@EXAMPLE.COM"; @@ -607,7 +607,7 @@ public Void call() throws Exception { // third user should not be able to cancel token final DelegationToken tmpToken2 = - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public DelegationToken call() throws Exception { Principal principal1 = () -> "client@EXAMPLE.COM"; @@ -623,7 +623,7 @@ public DelegationToken call() throws Exception { } }); - KerberosTestUtils.doAs("client3", new Callable() { + KerberosTestUtils.callAs("client3", new Callable() { @Override public Void call() throws Exception { Principal principal1 = () -> "client3@EXAMPLE.COM"; @@ -659,7 +659,7 @@ private void testCancelTokenBadRequests(String mType, String cType) dtoken.setRenewer(renewer); // bad request(invalid header value) - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { Principal principal1 = () -> "client@EXAMPLE.COM"; @@ -676,7 +676,7 @@ public Void call() throws Exception { }); // bad request(missing header) - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public Void call() throws Exception { Principal principal1 = () -> "client@EXAMPLE.COM"; @@ -692,7 +692,7 @@ public Void call() throws Exception { // bad request(cancelled token) final DelegationToken tmpToken = - KerberosTestUtils.doAsClient(new Callable() { + KerberosTestUtils.callAsClient(new Callable() { @Override public DelegationToken call() throws Exception { Principal principal1 = () -> "client@EXAMPLE.COM"; @@ -708,7 +708,7 @@ public DelegationToken call() throws Exception { } }); - KerberosTestUtils.doAs(renewer, new Callable() { + KerberosTestUtils.callAs(renewer, new Callable() { @Override public Void call() throws Exception { Principal principal1 = () -> "client2@EXAMPLE.COM"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java index 8f3c4d0fe577e..0fb77c2369a7c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java @@ -47,6 +47,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -202,7 +203,7 @@ protected void serviceStop() throws Exception { } protected void shutDown() { - new Thread(Router.this::stop).start(); + new HadoopThread(Router.this::stop).start(); } protected RouterClientRMService createClientRMProxyService() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java index 0c02fa1e8caae..13613a8fd2640 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java @@ -29,6 +29,7 @@ import java.util.Map; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; @@ -226,9 +227,9 @@ public void testClientPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * ClientRequestInterceptor for the user. */ - class ClientTestThread extends Thread { + class ClientTestThread extends HadoopThread { private ClientRequestInterceptor interceptor; - @Override public void run() { + @Override public void work() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java index d4b02f4d951f3..15cdcad9f4f26 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java @@ -29,6 +29,7 @@ import java.util.Map; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; @@ -235,9 +236,9 @@ public void testRMAdminPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * RMAdminRequestInterceptor for the user. */ - class ClientTestThread extends Thread { + class ClientTestThread extends HadoopThread { private RMAdminRequestInterceptor interceptor; - @Override public void run() { + @Override public void work() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java index ceb75e38a24a7..60f7bf8ac4a80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java @@ -31,6 +31,7 @@ import javax.ws.rs.core.Response; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; @@ -297,9 +298,9 @@ public void testWebPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * RESTRequestInterceptor for the user. */ - class ClientTestThread extends Thread { + class ClientTestThread extends HadoopThread { private RESTRequestInterceptor interceptor; - @Override public void run() { + @Override public void work() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java index 8877105c56788..7e1a0a34e04b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java @@ -360,7 +360,7 @@ void testPutTimelineEntities(boolean withSsl, boolean withKerberosLogin) throws "test_flow_version" + File.separator + "1" + File.separator + appId.toString() + File.separator + entityType); if (withKerberosLogin) { - KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable() { + KerberosTestUtils.callAs(HTTP_USER + "/localhost", new Callable() { @Override public Void call() throws Exception { publishAndVerifyEntity(appId, entityTypeDir, entityType, 1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java index d8a7a56ac198b..1860bc1953ff7 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java @@ -24,6 +24,7 @@ import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Sets; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; import org.apache.hadoop.yarn.server.timelineservice.documentstore.DocumentStoreUtils; import org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.document.NoDocumentFoundException; @@ -244,7 +245,7 @@ public synchronized void close() { } private void addShutdownHook() { - Runtime.getRuntime().addShutdownHook(new Thread(() -> { + Runtime.getRuntime().addShutdownHook(new HadoopThread(() -> { if (executorService != null) { executorService.shutdown(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java index 7cfb7f2fe415c..3e15671872c1a 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java @@ -35,6 +35,7 @@ import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.timelineservice.metrics.PerNodeAggTimelineCollectorMetrics; import org.apache.hadoop.yarn.server.timelineservice.documentstore.DocumentStoreUtils; import org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.CollectionType; @@ -279,7 +280,7 @@ public synchronized void close() { } private void addShutdownHook() { - Runtime.getRuntime().addShutdownHook(new Thread(() -> { + Runtime.getRuntime().addShutdownHook(new HadoopThread(() -> { if (executorService != null) { executorService.shutdown(); }